2019-01-24 16:28:25 +03:00
|
|
|
package lnd
|
2015-12-30 02:09:38 +03:00
|
|
|
|
|
|
|
import (
|
2018-03-28 07:51:04 +03:00
|
|
|
"bytes"
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
"crypto/tls"
|
2016-06-21 21:52:09 +03:00
|
|
|
"encoding/hex"
|
2017-01-25 04:12:51 +03:00
|
|
|
"errors"
|
2015-12-31 08:40:41 +03:00
|
|
|
"fmt"
|
2016-07-13 03:46:25 +03:00
|
|
|
"io"
|
2016-12-27 08:51:47 +03:00
|
|
|
"math"
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
"net/http"
|
2018-06-15 03:14:31 +03:00
|
|
|
"sort"
|
2017-01-15 05:12:20 +03:00
|
|
|
"strings"
|
2018-07-01 01:13:14 +03:00
|
|
|
"sync"
|
2016-01-17 06:12:36 +03:00
|
|
|
"sync/atomic"
|
2018-07-28 04:39:38 +03:00
|
|
|
"time"
|
2015-12-30 02:09:38 +03:00
|
|
|
|
2019-03-11 11:56:05 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
2019-04-05 18:36:11 +03:00
|
|
|
"github.com/lightningnetwork/lnd/routing/route"
|
2019-06-21 02:54:45 +03:00
|
|
|
"github.com/lightningnetwork/lnd/watchtower"
|
2019-03-11 11:56:05 +03:00
|
|
|
|
2018-07-31 10:17:17 +03:00
|
|
|
"github.com/btcsuite/btcd/blockchain"
|
|
|
|
"github.com/btcsuite/btcd/btcec"
|
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
|
|
|
"github.com/btcsuite/btcd/txscript"
|
|
|
|
"github.com/btcsuite/btcd/wire"
|
|
|
|
"github.com/btcsuite/btcutil"
|
|
|
|
"github.com/btcsuite/btcwallet/waddrmgr"
|
2019-03-05 16:22:30 +03:00
|
|
|
"github.com/btcsuite/btcwallet/wallet/txauthor"
|
2018-03-11 06:00:57 +03:00
|
|
|
"github.com/coreos/bbolt"
|
2016-09-21 03:15:26 +03:00
|
|
|
"github.com/davecgh/go-spew/spew"
|
2019-05-31 10:59:27 +03:00
|
|
|
"github.com/grpc-ecosystem/go-grpc-middleware"
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
proxy "github.com/grpc-ecosystem/grpc-gateway/runtime"
|
2018-12-13 14:26:30 +03:00
|
|
|
"github.com/lightningnetwork/lnd/autopilot"
|
2018-09-20 13:26:58 +03:00
|
|
|
"github.com/lightningnetwork/lnd/build"
|
2018-12-10 07:12:24 +03:00
|
|
|
"github.com/lightningnetwork/lnd/chanbackup"
|
2016-09-19 22:04:56 +03:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb"
|
2019-01-23 05:28:35 +03:00
|
|
|
"github.com/lightningnetwork/lnd/channelnotifier"
|
2019-03-23 05:57:03 +03:00
|
|
|
"github.com/lightningnetwork/lnd/discovery"
|
2017-05-02 23:04:58 +03:00
|
|
|
"github.com/lightningnetwork/lnd/htlcswitch"
|
2019-01-16 17:47:43 +03:00
|
|
|
"github.com/lightningnetwork/lnd/input"
|
2018-12-20 13:57:44 +03:00
|
|
|
"github.com/lightningnetwork/lnd/invoices"
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lncfg"
|
2016-01-16 21:38:48 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc"
|
2019-01-03 21:13:08 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
|
2019-01-15 12:06:48 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lntypes"
|
2016-08-13 01:53:18 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwallet"
|
2016-07-13 03:46:25 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
"github.com/lightningnetwork/lnd/macaroons"
|
2019-05-31 10:59:27 +03:00
|
|
|
"github.com/lightningnetwork/lnd/monitoring"
|
2016-12-27 08:50:19 +03:00
|
|
|
"github.com/lightningnetwork/lnd/routing"
|
2018-06-15 06:19:45 +03:00
|
|
|
"github.com/lightningnetwork/lnd/signal"
|
2018-11-18 07:44:34 +03:00
|
|
|
"github.com/lightningnetwork/lnd/sweep"
|
2017-01-03 02:36:15 +03:00
|
|
|
"github.com/lightningnetwork/lnd/zpay32"
|
2017-04-20 05:28:10 +03:00
|
|
|
"github.com/tv42/zbase32"
|
2015-12-30 02:09:38 +03:00
|
|
|
"golang.org/x/net/context"
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
"google.golang.org/grpc"
|
2018-07-28 04:39:38 +03:00
|
|
|
"gopkg.in/macaroon-bakery.v2/bakery"
|
2015-12-30 02:09:38 +03:00
|
|
|
)
|
|
|
|
|
2018-05-23 22:05:04 +03:00
|
|
|
const (
|
|
|
|
// maxBtcPaymentMSat is the maximum allowed Bitcoin payment currently
|
|
|
|
// permitted as defined in BOLT-0002.
|
|
|
|
maxBtcPaymentMSat = lnwire.MilliSatoshi(math.MaxUint32)
|
|
|
|
|
|
|
|
// maxLtcPaymentMSat is the maximum allowed Litecoin payment currently
|
|
|
|
// permitted.
|
|
|
|
maxLtcPaymentMSat = lnwire.MilliSatoshi(math.MaxUint32) *
|
|
|
|
btcToLtcConversionRate
|
|
|
|
)
|
|
|
|
|
2015-12-30 02:09:38 +03:00
|
|
|
var (
|
2019-05-15 10:02:53 +03:00
|
|
|
// MaxPaymentMSat is the maximum allowed payment currently permitted as
|
2018-05-23 22:05:04 +03:00
|
|
|
// defined in BOLT-002. This value depends on which chain is active.
|
|
|
|
// It is set to the value under the Bitcoin chain as default.
|
2019-05-15 10:02:53 +03:00
|
|
|
MaxPaymentMSat = maxBtcPaymentMSat
|
2018-05-23 22:05:04 +03:00
|
|
|
|
2015-12-30 02:09:38 +03:00
|
|
|
defaultAccount uint32 = waddrmgr.DefaultAccountNum
|
2017-08-18 04:50:57 +03:00
|
|
|
|
2018-01-16 19:18:41 +03:00
|
|
|
// readPermissions is a slice of all entities that allow read
|
|
|
|
// permissions for authorization purposes, all lowercase.
|
|
|
|
readPermissions = []bakery.Op{
|
|
|
|
{
|
|
|
|
Entity: "onchain",
|
|
|
|
Action: "read",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "read",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Entity: "address",
|
|
|
|
Action: "read",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Entity: "message",
|
|
|
|
Action: "read",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Entity: "peers",
|
|
|
|
Action: "read",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Entity: "info",
|
|
|
|
Action: "read",
|
|
|
|
},
|
2018-03-21 02:42:15 +03:00
|
|
|
{
|
|
|
|
Entity: "invoices",
|
|
|
|
Action: "read",
|
|
|
|
},
|
2018-01-16 19:18:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// writePermissions is a slice of all entities that allow write
|
|
|
|
// permissions for authorization purposes, all lowercase.
|
|
|
|
writePermissions = []bakery.Op{
|
|
|
|
{
|
|
|
|
Entity: "onchain",
|
|
|
|
Action: "write",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "write",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Entity: "address",
|
|
|
|
Action: "write",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Entity: "message",
|
|
|
|
Action: "write",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Entity: "peers",
|
|
|
|
Action: "write",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Entity: "info",
|
|
|
|
Action: "write",
|
|
|
|
},
|
2018-03-21 02:42:15 +03:00
|
|
|
{
|
|
|
|
Entity: "invoices",
|
|
|
|
Action: "write",
|
|
|
|
},
|
2018-12-07 03:22:16 +03:00
|
|
|
{
|
|
|
|
Entity: "signer",
|
|
|
|
Action: "generate",
|
|
|
|
},
|
2018-03-21 02:42:15 +03:00
|
|
|
}
|
|
|
|
|
2018-03-21 02:40:14 +03:00
|
|
|
// invoicePermissions is a slice of all the entities that allows a user
|
|
|
|
// to only access calls that are related to invoices, so: streaming
|
2018-04-27 00:06:29 +03:00
|
|
|
// RPCs, generating, and listening invoices.
|
2018-03-21 02:40:14 +03:00
|
|
|
invoicePermissions = []bakery.Op{
|
|
|
|
{
|
|
|
|
Entity: "invoices",
|
|
|
|
Action: "read",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Entity: "invoices",
|
|
|
|
Action: "write",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Entity: "address",
|
|
|
|
Action: "read",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Entity: "address",
|
|
|
|
Action: "write",
|
|
|
|
},
|
2018-01-16 19:18:41 +03:00
|
|
|
}
|
2019-06-25 13:19:55 +03:00
|
|
|
)
|
2018-01-16 19:18:41 +03:00
|
|
|
|
2019-06-25 13:19:55 +03:00
|
|
|
// mainRPCServerPermissions returns a mapping of the main RPC server calls to
|
|
|
|
// the permissions they require.
|
|
|
|
func mainRPCServerPermissions() map[string][]bakery.Op {
|
|
|
|
return map[string][]bakery.Op{
|
2018-01-16 19:18:41 +03:00
|
|
|
"/lnrpc.Lightning/SendCoins": {{
|
|
|
|
Entity: "onchain",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
2018-09-27 16:49:44 +03:00
|
|
|
"/lnrpc.Lightning/ListUnspent": {{
|
|
|
|
Entity: "onchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
2018-01-16 19:18:41 +03:00
|
|
|
"/lnrpc.Lightning/SendMany": {{
|
|
|
|
Entity: "onchain",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/NewAddress": {{
|
|
|
|
Entity: "address",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/SignMessage": {{
|
|
|
|
Entity: "message",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/VerifyMessage": {{
|
|
|
|
Entity: "message",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/ConnectPeer": {{
|
|
|
|
Entity: "peers",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/DisconnectPeer": {{
|
|
|
|
Entity: "peers",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/OpenChannel": {{
|
|
|
|
Entity: "onchain",
|
|
|
|
Action: "write",
|
|
|
|
}, {
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
2018-02-12 04:15:31 +03:00
|
|
|
"/lnrpc.Lightning/OpenChannelSync": {{
|
2018-01-16 19:18:41 +03:00
|
|
|
Entity: "onchain",
|
|
|
|
Action: "write",
|
|
|
|
}, {
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/CloseChannel": {{
|
|
|
|
Entity: "onchain",
|
|
|
|
Action: "write",
|
|
|
|
}, {
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
2018-05-29 12:26:47 +03:00
|
|
|
"/lnrpc.Lightning/AbandonChannel": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
2018-01-16 19:18:41 +03:00
|
|
|
"/lnrpc.Lightning/GetInfo": {{
|
|
|
|
Entity: "info",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/ListPeers": {{
|
|
|
|
Entity: "peers",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/WalletBalance": {{
|
|
|
|
Entity: "onchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
2019-03-05 16:22:30 +03:00
|
|
|
"/lnrpc.Lightning/EstimateFee": {{
|
|
|
|
Entity: "onchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
2018-01-16 19:18:41 +03:00
|
|
|
"/lnrpc.Lightning/ChannelBalance": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/PendingChannels": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/ListChannels": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
2019-01-23 05:28:35 +03:00
|
|
|
"/lnrpc.Lightning/SubscribeChannelEvents": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
2018-05-24 12:35:34 +03:00
|
|
|
"/lnrpc.Lightning/ClosedChannels": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
2018-01-16 19:18:41 +03:00
|
|
|
"/lnrpc.Lightning/SendPayment": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/SendPaymentSync": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
2018-05-01 11:17:55 +03:00
|
|
|
"/lnrpc.Lightning/SendToRoute": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/SendToRouteSync": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
2018-01-16 19:18:41 +03:00
|
|
|
"/lnrpc.Lightning/AddInvoice": {{
|
2018-03-21 02:44:53 +03:00
|
|
|
Entity: "invoices",
|
2018-01-16 19:18:41 +03:00
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/LookupInvoice": {{
|
2018-03-21 02:44:53 +03:00
|
|
|
Entity: "invoices",
|
2018-01-16 19:18:41 +03:00
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/ListInvoices": {{
|
2018-03-21 02:44:53 +03:00
|
|
|
Entity: "invoices",
|
2018-01-16 19:18:41 +03:00
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/SubscribeInvoices": {{
|
2018-03-21 02:44:53 +03:00
|
|
|
Entity: "invoices",
|
2018-01-16 19:18:41 +03:00
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/SubscribeTransactions": {{
|
|
|
|
Entity: "onchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/GetTransactions": {{
|
|
|
|
Entity: "onchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/DescribeGraph": {{
|
|
|
|
Entity: "info",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/GetChanInfo": {{
|
|
|
|
Entity: "info",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/GetNodeInfo": {{
|
|
|
|
Entity: "info",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/QueryRoutes": {{
|
|
|
|
Entity: "info",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/GetNetworkInfo": {{
|
|
|
|
Entity: "info",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/StopDaemon": {{
|
|
|
|
Entity: "info",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/SubscribeChannelGraph": {{
|
|
|
|
Entity: "info",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/ListPayments": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/DeleteAllPayments": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/DebugLevel": {{
|
|
|
|
Entity: "info",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/DecodePayReq": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/FeeReport": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/UpdateChannelPolicy": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
2018-02-28 09:23:27 +03:00
|
|
|
"/lnrpc.Lightning/ForwardingHistory": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
2018-12-10 07:12:24 +03:00
|
|
|
"/lnrpc.Lightning/RestoreChannelBackups": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/ExportChannelBackup": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
2019-03-11 03:54:49 +03:00
|
|
|
"/lnrpc.Lightning/VerifyChanBackup": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
2018-12-10 07:12:24 +03:00
|
|
|
"/lnrpc.Lightning/ExportAllChannelBackups": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/SubscribeChannelBackups": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
2017-08-18 04:50:57 +03:00
|
|
|
}
|
2019-06-25 13:19:55 +03:00
|
|
|
}
|
2015-12-30 02:09:38 +03:00
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// rpcServer is a gRPC, RPC front end to the lnd daemon.
|
2016-09-26 20:29:18 +03:00
|
|
|
// TODO(roasbeef): pagination support for the list-style calls
|
2016-01-17 06:07:44 +03:00
|
|
|
type rpcServer struct {
|
|
|
|
started int32 // To be used atomically.
|
|
|
|
shutdown int32 // To be used atomically.
|
|
|
|
|
2017-08-22 09:25:41 +03:00
|
|
|
server *server
|
|
|
|
|
2016-01-17 06:07:44 +03:00
|
|
|
wg sync.WaitGroup
|
|
|
|
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
// subServers are a set of sub-RPC servers that use the same gRPC and
|
|
|
|
// listening sockets as the main RPC server, but which maintain their
|
|
|
|
// own independent service. This allows us to expose a set of
|
|
|
|
// micro-service like abstractions to the outside world for users to
|
|
|
|
// consume.
|
|
|
|
subServers []lnrpc.SubServer
|
|
|
|
|
|
|
|
// grpcServer is the main gRPC server that this RPC server, and all the
|
|
|
|
// sub-servers will use to register themselves and accept client
|
|
|
|
// requests from.
|
|
|
|
grpcServer *grpc.Server
|
|
|
|
|
|
|
|
// listenerCleanUp are a set of closures functions that will allow this
|
|
|
|
// main RPC server to clean up all the listening socket created for the
|
|
|
|
// server.
|
|
|
|
listenerCleanUp []func()
|
|
|
|
|
2019-01-30 11:09:57 +03:00
|
|
|
// restDialOpts are a set of gRPC dial options that the REST server
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
// proxy will use to connect to the main gRPC server.
|
2019-01-30 11:09:57 +03:00
|
|
|
restDialOpts []grpc.DialOption
|
|
|
|
|
|
|
|
// restProxyDest is the address to forward REST requests to.
|
|
|
|
restProxyDest string
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
|
|
|
|
// tlsCfg is the TLS config that allows the REST server proxy to
|
|
|
|
// connect to the main gRPC server to proxy all incoming requests.
|
|
|
|
tlsCfg *tls.Config
|
|
|
|
|
2019-03-14 16:19:35 +03:00
|
|
|
// routerBackend contains the backend implementation of the router
|
2019-03-11 11:56:05 +03:00
|
|
|
// rpc sub server.
|
2019-03-14 16:19:35 +03:00
|
|
|
routerBackend *routerrpc.RouterBackend
|
2019-03-11 11:56:05 +03:00
|
|
|
|
2016-01-17 06:07:44 +03:00
|
|
|
quit chan struct{}
|
2015-12-30 02:09:38 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// A compile time check to ensure that rpcServer fully implements the
|
|
|
|
// LightningServer gRPC service.
|
2015-12-30 02:21:42 +03:00
|
|
|
var _ lnrpc.LightningServer = (*rpcServer)(nil)
|
|
|
|
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
// newRPCServer creates and returns a new instance of the rpcServer. The
|
|
|
|
// rpcServer will handle creating all listening sockets needed by it, and any
|
|
|
|
// of the sub-servers that it maintains. The set of serverOpts should be the
|
|
|
|
// base level options passed to the grPC server. This typically includes things
|
|
|
|
// like requiring TLS, etc.
|
|
|
|
func newRPCServer(s *server, macService *macaroons.Service,
|
2018-11-03 01:41:38 +03:00
|
|
|
subServerCgs *subRPCServerConfigs, serverOpts []grpc.ServerOption,
|
2019-01-30 11:09:57 +03:00
|
|
|
restDialOpts []grpc.DialOption, restProxyDest string,
|
|
|
|
atpl *autopilot.Manager, invoiceRegistry *invoices.InvoiceRegistry,
|
2019-06-21 02:54:45 +03:00
|
|
|
tower *watchtower.Standalone, tlsCfg *tls.Config) (*rpcServer, error) {
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
|
2019-03-14 16:19:35 +03:00
|
|
|
// Set up router rpc backend.
|
|
|
|
channelGraph := s.chanDB.ChannelGraph()
|
|
|
|
selfNode, err := channelGraph.SourceNode()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
graph := s.chanDB.ChannelGraph()
|
|
|
|
routerBackend := &routerrpc.RouterBackend{
|
2019-05-15 10:02:53 +03:00
|
|
|
MaxPaymentMSat: MaxPaymentMSat,
|
2019-03-14 16:19:35 +03:00
|
|
|
SelfNode: selfNode.PubKeyBytes,
|
|
|
|
FetchChannelCapacity: func(chanID uint64) (btcutil.Amount,
|
|
|
|
error) {
|
|
|
|
|
|
|
|
info, _, _, err := graph.FetchChannelEdgesByID(chanID)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
return info.Capacity, nil
|
|
|
|
},
|
2019-03-14 17:13:45 +03:00
|
|
|
FetchChannelEndpoints: func(chanID uint64) (route.Vertex,
|
|
|
|
route.Vertex, error) {
|
|
|
|
|
|
|
|
info, _, _, err := graph.FetchChannelEdgesByID(
|
|
|
|
chanID,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return route.Vertex{}, route.Vertex{},
|
|
|
|
fmt.Errorf("unable to fetch channel "+
|
|
|
|
"edges by channel ID %d: %v",
|
|
|
|
chanID, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return info.NodeKey1Bytes, info.NodeKey2Bytes, nil
|
|
|
|
},
|
2019-04-18 10:34:28 +03:00
|
|
|
FindRoute: s.chanRouter.FindRoute,
|
|
|
|
MissionControl: s.missionControl,
|
|
|
|
ActiveNetParams: activeNetParams.Params,
|
2019-03-22 12:21:25 +03:00
|
|
|
Tower: s.controlTower,
|
2019-03-14 16:19:35 +03:00
|
|
|
}
|
|
|
|
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
var (
|
|
|
|
subServers []lnrpc.SubServer
|
|
|
|
subServerPerms []lnrpc.MacaroonPerms
|
|
|
|
)
|
|
|
|
|
|
|
|
// Before we create any of the sub-servers, we need to ensure that all
|
|
|
|
// the dependencies they need are properly populated within each sub
|
|
|
|
// server configuration struct.
|
2019-03-14 16:19:35 +03:00
|
|
|
err = subServerCgs.PopulateDependencies(
|
2018-12-20 13:42:28 +03:00
|
|
|
s.cc, networkDir, macService, atpl, invoiceRegistry,
|
2019-01-15 14:11:22 +03:00
|
|
|
s.htlcSwitch, activeNetParams.Params, s.chanRouter,
|
2019-05-17 05:53:25 +03:00
|
|
|
routerBackend, s.nodeSigner, s.chanDB, s.sweeper,
|
2019-06-21 02:54:45 +03:00
|
|
|
tower,
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that the sub-servers have all their dependencies in place, we
|
|
|
|
// can create each sub-server!
|
|
|
|
registeredSubServers := lnrpc.RegisteredSubServers()
|
|
|
|
for _, subServer := range registeredSubServers {
|
|
|
|
subServerInstance, macPerms, err := subServer.New(subServerCgs)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll collect the sub-server, and also the set of
|
|
|
|
// permissions it needs for macaroons so we can apply the
|
|
|
|
// interceptors below.
|
|
|
|
subServers = append(subServers, subServerInstance)
|
|
|
|
subServerPerms = append(subServerPerms, macPerms)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, we need to merge the set of sub server macaroon permissions
|
|
|
|
// with the main RPC server permissions so we can unite them under a
|
|
|
|
// single set of interceptors.
|
2019-06-25 13:19:55 +03:00
|
|
|
permissions := mainRPCServerPermissions()
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
for _, subServerPerm := range subServerPerms {
|
|
|
|
for method, ops := range subServerPerm {
|
|
|
|
// For each new method:ops combo, we also ensure that
|
|
|
|
// non of the sub-servers try to override each other.
|
|
|
|
if _, ok := permissions[method]; ok {
|
|
|
|
return nil, fmt.Errorf("detected duplicate "+
|
|
|
|
"macaroon constraints for path: %v",
|
|
|
|
method)
|
|
|
|
}
|
|
|
|
|
|
|
|
permissions[method] = ops
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If macaroons aren't disabled (a non-nil service), then we'll set up
|
2019-05-31 10:59:27 +03:00
|
|
|
// our set of interceptors which will allow us to handle the macaroon
|
|
|
|
// authentication in a single location.
|
|
|
|
macUnaryInterceptors := []grpc.UnaryServerInterceptor{}
|
|
|
|
macStrmInterceptors := []grpc.StreamServerInterceptor{}
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
if macService != nil {
|
2019-05-31 10:59:27 +03:00
|
|
|
unaryInterceptor := macService.UnaryServerInterceptor(permissions)
|
|
|
|
macUnaryInterceptors = append(macUnaryInterceptors, unaryInterceptor)
|
|
|
|
|
|
|
|
strmInterceptor := macService.StreamServerInterceptor(permissions)
|
|
|
|
macStrmInterceptors = append(macStrmInterceptors, strmInterceptor)
|
|
|
|
}
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
|
2019-05-31 10:59:27 +03:00
|
|
|
// Get interceptors for Prometheus to gather gRPC performance metrics.
|
|
|
|
// If monitoring is disabled, GetPromInterceptors() will return empty
|
|
|
|
// slices.
|
|
|
|
promUnaryInterceptors, promStrmInterceptors := monitoring.GetPromInterceptors()
|
|
|
|
|
|
|
|
// Concatenate the slices of unary and stream interceptors respectively.
|
|
|
|
unaryInterceptors := append(macUnaryInterceptors, promUnaryInterceptors...)
|
|
|
|
strmInterceptors := append(macStrmInterceptors, promStrmInterceptors...)
|
|
|
|
|
|
|
|
// If any interceptors have been set up, add them to the server options.
|
|
|
|
if len(unaryInterceptors) != 0 && len(strmInterceptors) != 0 {
|
|
|
|
chainedUnary := grpc_middleware.WithUnaryServerChain(
|
|
|
|
unaryInterceptors...,
|
|
|
|
)
|
|
|
|
chainedStream := grpc_middleware.WithStreamServerChain(
|
|
|
|
strmInterceptors...,
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
)
|
2019-05-31 10:59:27 +03:00
|
|
|
serverOpts = append(serverOpts, chainedUnary, chainedStream)
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, with all the pre-set up complete, we can create the main
|
|
|
|
// gRPC server, and register the main lnrpc server along side.
|
|
|
|
grpcServer := grpc.NewServer(serverOpts...)
|
2018-11-03 01:41:38 +03:00
|
|
|
rootRPCServer := &rpcServer{
|
2019-01-30 11:09:57 +03:00
|
|
|
restDialOpts: restDialOpts,
|
|
|
|
restProxyDest: restProxyDest,
|
|
|
|
subServers: subServers,
|
|
|
|
tlsCfg: tlsCfg,
|
|
|
|
grpcServer: grpcServer,
|
|
|
|
server: s,
|
|
|
|
routerBackend: routerBackend,
|
|
|
|
quit: make(chan struct{}, 1),
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
}
|
2018-11-03 01:41:38 +03:00
|
|
|
lnrpc.RegisterLightningServer(grpcServer, rootRPCServer)
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
|
|
|
|
// Now the main RPC server has been registered, we'll iterate through
|
|
|
|
// all the sub-RPC servers and register them to ensure that requests
|
|
|
|
// are properly routed towards them.
|
|
|
|
for _, subServer := range subServers {
|
|
|
|
err := subServer.RegisterWithRootServer(grpcServer)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to register "+
|
|
|
|
"sub-server %v with root: %v",
|
|
|
|
subServer.Name(), err)
|
|
|
|
}
|
2017-08-18 04:50:57 +03:00
|
|
|
}
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
|
2018-11-03 01:41:38 +03:00
|
|
|
return rootRPCServer, nil
|
2015-12-30 02:09:38 +03:00
|
|
|
}
|
|
|
|
|
2017-08-22 09:25:41 +03:00
|
|
|
// Start launches any helper goroutines required for the rpcServer to function.
|
2016-01-17 06:12:36 +03:00
|
|
|
func (r *rpcServer) Start() error {
|
|
|
|
if atomic.AddInt32(&r.started, 1) != 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
// First, we'll start all the sub-servers to ensure that they're ready
|
|
|
|
// to take new requests in.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): some may require that the entire daemon be started
|
|
|
|
// at that point
|
|
|
|
for _, subServer := range r.subServers {
|
|
|
|
rpcsLog.Debugf("Starting sub RPC server: %v", subServer.Name())
|
|
|
|
|
|
|
|
if err := subServer.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// With all the sub-servers started, we'll spin up the listeners for
|
|
|
|
// the main RPC server itself.
|
|
|
|
for _, listener := range cfg.RPCListeners {
|
|
|
|
lis, err := lncfg.ListenOnAddress(listener)
|
|
|
|
if err != nil {
|
|
|
|
ltndLog.Errorf(
|
|
|
|
"RPC server unable to listen on %s", listener,
|
|
|
|
)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
r.listenerCleanUp = append(r.listenerCleanUp, func() {
|
|
|
|
lis.Close()
|
|
|
|
})
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
rpcsLog.Infof("RPC server listening on %s", lis.Addr())
|
|
|
|
r.grpcServer.Serve(lis)
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2019-05-31 10:59:27 +03:00
|
|
|
// If Prometheus monitoring is enabled, start the Prometheus exporter.
|
|
|
|
if cfg.Prometheus.Enabled() {
|
|
|
|
err := monitoring.ExportPrometheusMetrics(
|
|
|
|
r.grpcServer, cfg.Prometheus,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-01 00:25:26 +03:00
|
|
|
// Finally, start the REST proxy for our gRPC server above. We'll ensure
|
|
|
|
// we direct LND to connect to its loopback address rather than a
|
|
|
|
// wildcard to prevent certificate issues when accessing the proxy
|
|
|
|
// externally.
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
//
|
|
|
|
// TODO(roasbeef): eventually also allow the sub-servers to themselves
|
|
|
|
// have a REST proxy.
|
|
|
|
mux := proxy.NewServeMux()
|
2019-01-30 11:09:57 +03:00
|
|
|
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
err := lnrpc.RegisterLightningHandlerFromEndpoint(
|
2019-01-30 11:09:57 +03:00
|
|
|
context.Background(), mux, r.restProxyDest,
|
|
|
|
r.restDialOpts,
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, restEndpoint := range cfg.RESTListeners {
|
|
|
|
lis, err := lncfg.TLSListenOnAddress(restEndpoint, r.tlsCfg)
|
|
|
|
if err != nil {
|
|
|
|
ltndLog.Errorf(
|
|
|
|
"gRPC proxy unable to listen on %s",
|
|
|
|
restEndpoint,
|
|
|
|
)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
r.listenerCleanUp = append(r.listenerCleanUp, func() {
|
|
|
|
lis.Close()
|
|
|
|
})
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
rpcsLog.Infof("gRPC proxy started at %s", lis.Addr())
|
|
|
|
http.Serve(lis, mux)
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2016-01-14 08:41:46 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// Stop signals any active goroutines for a graceful closure.
|
2016-01-17 06:12:36 +03:00
|
|
|
func (r *rpcServer) Stop() error {
|
|
|
|
if atomic.AddInt32(&r.shutdown, 1) != 1 {
|
|
|
|
return nil
|
2016-01-02 07:27:40 +03:00
|
|
|
}
|
2016-01-17 06:12:36 +03:00
|
|
|
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
rpcsLog.Infof("Stopping RPC Server")
|
|
|
|
|
2016-04-25 06:26:32 +03:00
|
|
|
close(r.quit)
|
|
|
|
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
// After we've signalled all of our active goroutines to exit, we'll
|
|
|
|
// then do the same to signal a graceful shutdown of all the sub
|
|
|
|
// servers.
|
|
|
|
for _, subServer := range r.subServers {
|
|
|
|
rpcsLog.Infof("Stopping %v Sub-RPC Server",
|
|
|
|
subServer.Name())
|
|
|
|
|
|
|
|
if err := subServer.Stop(); err != nil {
|
2018-11-03 01:41:38 +03:00
|
|
|
rpcsLog.Errorf("unable to stop sub-server %v: %v",
|
|
|
|
subServer.Name(), err)
|
lnd+rpc: modify rpcServer to fully manaage listeners and gRPC, handle sub-servers
In this commit, we modify the existing rpcServer to fully manage the
macaroons, gRPC server, and also seek out and create all sub-servers.
With this change, the RPC server gains more responsibility, as it
becomes the "root" server in the hierarchy of gRPC sub-servers.
In addition to creating each sub-server, it will also merge the set of
macaroon permissions for each sub-server, with the permissions of the
rest of the RPC infra. As a result, each sub-server is able to
independently specify what it needs w.r.t macaroon permissions and have
that taken care of by the RPC server. In order to achieve this, we need
to unify the creation of the RPC interceptors, and also fully manage the
gRPC server ourselves.
Some examples with various build configs:
```
⛰i make build
Building debug lnd and lncli.
go build -v -tags="dev" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
unknown flag `signrpc.signermacaroonpath'
unknown flag `signrpc.signermacaroonpath'
⛰i make build tags=signerrpc
Building debug lnd and lncli.
go build -v -tags="dev signerrpc" -o lnd-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd
go build -v -tags="dev signerrpc" -o lncli-debug -ldflags "-X github.com/lightningnetwork/lnd/build.Commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty" github.com/lightningnetwork/lnd/cmd/lncli
⛰i ./lnd-debug --debuglevel=debug --signrpc.signermacaroonpath=~/sign.macaroon
2018-10-22 17:31:01.132 [INF] LTND: Version: 0.5.0-beta commit=v0.5-beta-143-gb2069914c4b76109b7c59320dc48f8a5f30deb75-dirty, build=development, logging=default
2018-10-22 17:31:01.133 [INF] LTND: Active chain: Bitcoin (network=simnet)
2018-10-22 17:31:01.140 [INF] CHDB: Checking for schema update: latest_version=6, db_version=6
2018-10-22 17:31:01.236 [INF] LTND: Primary chain is set to: bitcoin
2018-10-22 17:31:02.391 [INF] LNWL: Opened wallet
2018-10-22 17:31:03.315 [INF] LNWL: The wallet has been unlocked without a time limit
2018-10-22 17:31:03.315 [INF] LTND: LightningWallet opened
2018-10-22 17:31:03.319 [INF] LNWL: Catching up block hashes to height 3060, this will take a while...
2018-10-22 17:31:03.320 [INF] HSWC: Restoring in-memory circuit state from disk
2018-10-22 17:31:03.320 [INF] LNWL: Done catching up block hashes
2018-10-22 17:31:03.320 [INF] HSWC: Payment circuits loaded: num_pending=0, num_open=0
2018-10-22 17:31:03.322 [DBG] LTND: Populating dependencies for sub RPC server: Signrpc
```
As for the config, an example is:
```
[signrpc]
signrpc.signermacaroonpath=~/signer.macaroon
```
2018-10-23 04:03:07 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, we can clean up all the listening sockets to ensure that we
|
|
|
|
// give the file descriptors back to the OS.
|
|
|
|
for _, cleanUp := range r.listenerCleanUp {
|
|
|
|
cleanUp()
|
|
|
|
}
|
|
|
|
|
2016-01-17 06:12:36 +03:00
|
|
|
return nil
|
2016-01-02 07:27:40 +03:00
|
|
|
}
|
|
|
|
|
2016-06-29 21:31:29 +03:00
|
|
|
// addrPairsToOutputs converts a map describing a set of outputs to be created,
|
|
|
|
// the outputs themselves. The passed map pairs up an address, to a desired
|
|
|
|
// output value amount. Each address is converted to its corresponding pkScript
|
|
|
|
// to be used within the constructed output(s).
|
|
|
|
func addrPairsToOutputs(addrPairs map[string]int64) ([]*wire.TxOut, error) {
|
|
|
|
outputs := make([]*wire.TxOut, 0, len(addrPairs))
|
|
|
|
for addr, amt := range addrPairs {
|
2016-07-14 04:37:50 +03:00
|
|
|
addr, err := btcutil.DecodeAddress(addr, activeNetParams.Params)
|
2016-03-23 04:48:46 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
pkscript, err := txscript.PayToAddrScript(addr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
outputs = append(outputs, wire.NewTxOut(amt, pkscript))
|
2015-12-30 02:09:38 +03:00
|
|
|
}
|
|
|
|
|
2016-06-29 21:31:29 +03:00
|
|
|
return outputs, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// sendCoinsOnChain makes an on-chain transaction in or to send coins to one or
|
|
|
|
// more addresses specified in the passed payment map. The payment map maps an
|
|
|
|
// address to a specified output value to be sent to that address.
|
2017-11-23 09:57:23 +03:00
|
|
|
func (r *rpcServer) sendCoinsOnChain(paymentMap map[string]int64,
|
2018-07-28 04:39:38 +03:00
|
|
|
feeRate lnwallet.SatPerKWeight) (*chainhash.Hash, error) {
|
2017-11-23 09:57:23 +03:00
|
|
|
|
2016-06-29 21:31:29 +03:00
|
|
|
outputs, err := addrPairsToOutputs(paymentMap)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-11-05 14:30:32 +03:00
|
|
|
tx, err := r.server.cc.wallet.SendOutputs(outputs, feeRate)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
txHash := tx.TxHash()
|
2018-11-18 07:44:34 +03:00
|
|
|
return &txHash, nil
|
2016-06-29 21:31:29 +03:00
|
|
|
}
|
|
|
|
|
2018-12-12 03:29:11 +03:00
|
|
|
// ListUnspent returns useful information about each unspent output owned by
|
|
|
|
// the wallet, as reported by the underlying `ListUnspentWitness`; the
|
|
|
|
// information returned is: outpoint, amount in satoshis, address, address
|
|
|
|
// type, scriptPubKey in hex and number of confirmations. The result is
|
|
|
|
// filtered to contain outputs whose number of confirmations is between a
|
|
|
|
// minimum and maximum number of confirmations specified by the user, with 0
|
|
|
|
// meaning unconfirmed.
|
2018-09-27 16:49:44 +03:00
|
|
|
func (r *rpcServer) ListUnspent(ctx context.Context,
|
|
|
|
in *lnrpc.ListUnspentRequest) (*lnrpc.ListUnspentResponse, error) {
|
2018-12-12 03:29:11 +03:00
|
|
|
|
2018-09-27 16:49:44 +03:00
|
|
|
minConfs := in.MinConfs
|
|
|
|
maxConfs := in.MaxConfs
|
|
|
|
|
2018-12-12 03:29:11 +03:00
|
|
|
switch {
|
|
|
|
// Ensure that the user didn't attempt to specify a negative number of
|
|
|
|
// confirmations, as that isn't possible.
|
|
|
|
case minConfs < 0:
|
2018-09-27 16:49:44 +03:00
|
|
|
return nil, fmt.Errorf("min confirmations must be >= 0")
|
2018-12-12 03:29:11 +03:00
|
|
|
|
|
|
|
// We'll also ensure that the min number of confs is strictly less than
|
|
|
|
// or equal to the max number of confs for sanity.
|
|
|
|
case minConfs > maxConfs:
|
2018-09-27 16:49:44 +03:00
|
|
|
return nil, fmt.Errorf("max confirmations must be >= min " +
|
|
|
|
"confirmations")
|
|
|
|
}
|
|
|
|
|
2018-12-12 03:29:11 +03:00
|
|
|
// With our arguments validated, we'll query the internal wallet for
|
|
|
|
// the set of UTXOs that match our query.
|
2018-09-27 16:49:44 +03:00
|
|
|
utxos, err := r.server.cc.wallet.ListUnspentWitness(minConfs, maxConfs)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-12-12 03:29:11 +03:00
|
|
|
resp := &lnrpc.ListUnspentResponse{
|
|
|
|
Utxos: make([]*lnrpc.Utxo, 0, len(utxos)),
|
|
|
|
}
|
|
|
|
|
2018-09-27 16:49:44 +03:00
|
|
|
for _, utxo := range utxos {
|
2018-12-12 03:29:11 +03:00
|
|
|
// Translate lnwallet address type to the proper gRPC proto
|
|
|
|
// address type.
|
2018-09-27 16:49:44 +03:00
|
|
|
var addrType lnrpc.AddressType
|
|
|
|
switch utxo.AddressType {
|
2018-12-12 03:29:11 +03:00
|
|
|
|
2018-09-27 16:49:44 +03:00
|
|
|
case lnwallet.WitnessPubKey:
|
|
|
|
addrType = lnrpc.AddressType_WITNESS_PUBKEY_HASH
|
2018-12-12 03:29:11 +03:00
|
|
|
|
2018-09-27 16:49:44 +03:00
|
|
|
case lnwallet.NestedWitnessPubKey:
|
|
|
|
addrType = lnrpc.AddressType_NESTED_PUBKEY_HASH
|
2018-12-12 03:29:11 +03:00
|
|
|
|
2018-09-27 16:49:44 +03:00
|
|
|
case lnwallet.UnknownAddressType:
|
2018-12-12 03:29:11 +03:00
|
|
|
rpcsLog.Warnf("[listunspent] utxo with address of "+
|
|
|
|
"unknown type ignored: %v",
|
|
|
|
utxo.OutPoint.String())
|
2018-09-27 16:49:44 +03:00
|
|
|
continue
|
2018-12-12 03:29:11 +03:00
|
|
|
|
2018-09-27 16:49:44 +03:00
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("invalid utxo address type")
|
|
|
|
}
|
|
|
|
|
2018-12-12 03:29:11 +03:00
|
|
|
// Now that we know we have a proper mapping to an address,
|
|
|
|
// we'll convert the regular outpoint to an lnrpc variant.
|
2019-02-02 05:01:51 +03:00
|
|
|
outpoint := &lnrpc.OutPoint{
|
|
|
|
TxidBytes: utxo.OutPoint.Hash[:],
|
|
|
|
TxidStr: utxo.OutPoint.Hash.String(),
|
2018-09-27 16:49:44 +03:00
|
|
|
OutputIndex: utxo.OutPoint.Index,
|
|
|
|
}
|
|
|
|
|
|
|
|
utxoResp := lnrpc.Utxo{
|
|
|
|
Type: addrType,
|
|
|
|
AmountSat: int64(utxo.Value),
|
2019-02-12 00:02:25 +03:00
|
|
|
PkScript: hex.EncodeToString(utxo.PkScript),
|
2018-09-27 16:49:44 +03:00
|
|
|
Outpoint: outpoint,
|
|
|
|
Confirmations: utxo.Confirmations,
|
|
|
|
}
|
|
|
|
|
2018-12-12 03:29:11 +03:00
|
|
|
// Finally, we'll attempt to extract the raw address from the
|
|
|
|
// script so we can display a human friendly address to the end
|
|
|
|
// user.
|
2018-09-27 16:49:44 +03:00
|
|
|
_, outAddresses, _, err := txscript.ExtractPkScriptAddrs(
|
2018-12-12 03:29:11 +03:00
|
|
|
utxo.PkScript, activeNetParams.Params,
|
|
|
|
)
|
2018-09-27 16:49:44 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-12-12 03:29:11 +03:00
|
|
|
// If we can't properly locate a single address, then this was
|
|
|
|
// an error in our mapping, and we'll return an error back to
|
|
|
|
// the user.
|
2018-09-27 16:49:44 +03:00
|
|
|
if len(outAddresses) != 1 {
|
2018-12-12 03:29:11 +03:00
|
|
|
return nil, fmt.Errorf("an output was unexpectedly " +
|
|
|
|
"multisig")
|
2018-09-27 16:49:44 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
utxoResp.Address = outAddresses[0].String()
|
2018-12-12 03:29:11 +03:00
|
|
|
|
2018-09-27 16:49:44 +03:00
|
|
|
resp.Utxos = append(resp.Utxos, &utxoResp)
|
|
|
|
}
|
|
|
|
|
|
|
|
maxStr := ""
|
|
|
|
if maxConfs != math.MaxInt32 {
|
|
|
|
maxStr = " max=" + fmt.Sprintf("%d", maxConfs)
|
|
|
|
}
|
2018-12-12 03:29:11 +03:00
|
|
|
|
|
|
|
rpcsLog.Debugf("[listunspent] min=%v%v, generated utxos: %v", minConfs,
|
|
|
|
maxStr, utxos)
|
2018-09-27 16:49:44 +03:00
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2019-03-05 16:22:30 +03:00
|
|
|
// EstimateFee handles a request for estimating the fee for sending a
|
|
|
|
// transaction spending to multiple specified outputs in parallel.
|
|
|
|
func (r *rpcServer) EstimateFee(ctx context.Context,
|
|
|
|
in *lnrpc.EstimateFeeRequest) (*lnrpc.EstimateFeeResponse, error) {
|
|
|
|
|
|
|
|
// Create the list of outputs we are spending to.
|
|
|
|
outputs, err := addrPairsToOutputs(in.AddrToAmount)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Query the fee estimator for the fee rate for the given confirmation
|
|
|
|
// target.
|
|
|
|
target := in.TargetConf
|
|
|
|
feePerKw, err := sweep.DetermineFeePerKw(
|
|
|
|
r.server.cc.feeEstimator, sweep.FeePreference{
|
|
|
|
ConfTarget: uint32(target),
|
|
|
|
},
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// We will ask the wallet to create a tx using this fee rate. We set
|
|
|
|
// dryRun=true to avoid inflating the change addresses in the db.
|
|
|
|
var tx *txauthor.AuthoredTx
|
|
|
|
wallet := r.server.cc.wallet
|
|
|
|
err = wallet.WithCoinSelectLock(func() error {
|
|
|
|
tx, err = wallet.CreateSimpleTx(outputs, feePerKw, true)
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Use the created tx to calculate the total fee.
|
|
|
|
totalOutput := int64(0)
|
|
|
|
for _, out := range tx.Tx.TxOut {
|
|
|
|
totalOutput += out.Value
|
|
|
|
}
|
|
|
|
totalFee := int64(tx.TotalInput) - totalOutput
|
|
|
|
|
|
|
|
resp := &lnrpc.EstimateFeeResponse{
|
|
|
|
FeeSat: totalFee,
|
|
|
|
FeerateSatPerByte: int64(feePerKw.FeePerKVByte() / 1000),
|
|
|
|
}
|
|
|
|
|
|
|
|
rpcsLog.Debugf("[estimatefee] fee estimate for conf target %d: %v",
|
|
|
|
target, resp)
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2016-06-29 21:31:29 +03:00
|
|
|
// SendCoins executes a request to send coins to a particular address. Unlike
|
|
|
|
// SendMany, this RPC call only allows creating a single output at a time.
|
|
|
|
func (r *rpcServer) SendCoins(ctx context.Context,
|
|
|
|
in *lnrpc.SendCoinsRequest) (*lnrpc.SendCoinsResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
// Based on the passed fee related parameters, we'll determine an
|
|
|
|
// appropriate fee rate for this transaction.
|
2018-11-18 07:44:34 +03:00
|
|
|
satPerKw := lnwallet.SatPerKVByte(in.SatPerByte * 1000).FeePerKWeight()
|
|
|
|
feePerKw, err := sweep.DetermineFeePerKw(
|
|
|
|
r.server.cc.feeEstimator, sweep.FeePreference{
|
|
|
|
ConfTarget: uint32(in.TargetConf),
|
|
|
|
FeeRate: satPerKw,
|
|
|
|
},
|
2017-11-23 09:57:23 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-11-18 08:10:32 +03:00
|
|
|
rpcsLog.Infof("[sendcoins] addr=%v, amt=%v, sat/kw=%v, sweep_all=%v",
|
|
|
|
in.Addr, btcutil.Amount(in.Amount), int64(feePerKw),
|
|
|
|
in.SendAll)
|
2016-06-29 21:31:29 +03:00
|
|
|
|
2019-02-27 22:16:34 +03:00
|
|
|
// Decode the address receiving the coins, we need to check whether the
|
|
|
|
// address is valid for this network.
|
|
|
|
targetAddr, err := btcutil.DecodeAddress(in.Addr, activeNetParams.Params)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make the check on the decoded address according to the active network.
|
|
|
|
if !targetAddr.IsForNet(activeNetParams.Params) {
|
|
|
|
return nil, fmt.Errorf("address: %v is not valid for this "+
|
|
|
|
"network: %v", targetAddr.String(),
|
|
|
|
activeNetParams.Params.Name)
|
|
|
|
}
|
|
|
|
|
2019-04-10 05:45:55 +03:00
|
|
|
// If the destination address parses to a valid pubkey, we assume the user
|
2019-05-05 01:35:37 +03:00
|
|
|
// accidentally tried to send funds to a bare pubkey address. This check is
|
2019-04-10 05:45:55 +03:00
|
|
|
// here to prevent unintended transfers.
|
|
|
|
decodedAddr, _ := hex.DecodeString(in.Addr)
|
|
|
|
_, err = btcec.ParsePubKey(decodedAddr, btcec.S256())
|
|
|
|
if err == nil {
|
|
|
|
return nil, fmt.Errorf("cannot send coins to pubkeys")
|
|
|
|
}
|
|
|
|
|
2018-11-18 08:10:32 +03:00
|
|
|
var txid *chainhash.Hash
|
|
|
|
|
|
|
|
wallet := r.server.cc.wallet
|
|
|
|
|
|
|
|
// If the send all flag is active, then we'll attempt to sweep all the
|
|
|
|
// coins in the wallet in a single transaction (if possible),
|
|
|
|
// otherwise, we'll respect the amount, and attempt a regular 2-output
|
|
|
|
// send.
|
|
|
|
if in.SendAll {
|
|
|
|
// At this point, the amount shouldn't be set since we've been
|
|
|
|
// instructed to sweep all the coins from the wallet.
|
|
|
|
if in.Amount != 0 {
|
|
|
|
return nil, fmt.Errorf("amount set while SendAll is " +
|
|
|
|
"active")
|
|
|
|
}
|
2016-06-29 21:31:29 +03:00
|
|
|
|
2018-11-18 08:10:32 +03:00
|
|
|
_, bestHeight, err := r.server.cc.chainIO.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the sweeper instance created, we can now generate a
|
|
|
|
// transaction that will sweep ALL outputs from the wallet in a
|
|
|
|
// single transaction. This will be generated in a concurrent
|
|
|
|
// safe manner, so no need to worry about locking.
|
|
|
|
sweepTxPkg, err := sweep.CraftSweepAllTx(
|
2019-02-27 22:16:34 +03:00
|
|
|
feePerKw, uint32(bestHeight), targetAddr, wallet,
|
2018-11-18 08:10:32 +03:00
|
|
|
wallet.WalletController, wallet.WalletController,
|
|
|
|
r.server.cc.feeEstimator, r.server.cc.signer,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
rpcsLog.Debugf("Sweeping all coins from wallet to addr=%v, "+
|
|
|
|
"with tx=%v", in.Addr, spew.Sdump(sweepTxPkg.SweepTx))
|
|
|
|
|
|
|
|
// As our sweep transaction was created, successfully, we'll
|
|
|
|
// now attempt to publish it, cancelling the sweep pkg to
|
|
|
|
// return all outputs if it fails.
|
|
|
|
err = wallet.PublishTransaction(sweepTxPkg.SweepTx)
|
|
|
|
if err != nil {
|
|
|
|
sweepTxPkg.CancelSweepAttempt()
|
|
|
|
|
|
|
|
return nil, fmt.Errorf("unable to broadcast sweep "+
|
|
|
|
"transaction: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
sweepTXID := sweepTxPkg.SweepTx.TxHash()
|
|
|
|
txid = &sweepTXID
|
|
|
|
} else {
|
|
|
|
|
|
|
|
// We'll now construct out payment map, and use the wallet's
|
|
|
|
// coin selection synchronization method to ensure that no coin
|
|
|
|
// selection (funding, sweep alls, other sends) can proceed
|
|
|
|
// while we instruct the wallet to send this transaction.
|
2019-02-27 22:16:34 +03:00
|
|
|
paymentMap := map[string]int64{targetAddr.String(): in.Amount}
|
2018-11-18 08:10:32 +03:00
|
|
|
err := wallet.WithCoinSelectLock(func() error {
|
|
|
|
newTXID, err := r.sendCoinsOnChain(paymentMap, feePerKw)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
txid = newTXID
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-06-29 21:31:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
rpcsLog.Infof("[sendcoins] spend generated txid: %v", txid.String())
|
|
|
|
|
|
|
|
return &lnrpc.SendCoinsResponse{Txid: txid.String()}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// SendMany handles a request for a transaction create multiple specified
|
|
|
|
// outputs in parallel.
|
|
|
|
func (r *rpcServer) SendMany(ctx context.Context,
|
|
|
|
in *lnrpc.SendManyRequest) (*lnrpc.SendManyResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
// Based on the passed fee related parameters, we'll determine an
|
2018-07-28 04:39:38 +03:00
|
|
|
// appropriate fee rate for this transaction.
|
2018-11-18 07:44:34 +03:00
|
|
|
satPerKw := lnwallet.SatPerKVByte(in.SatPerByte * 1000).FeePerKWeight()
|
|
|
|
feePerKw, err := sweep.DetermineFeePerKw(
|
|
|
|
r.server.cc.feeEstimator, sweep.FeePreference{
|
|
|
|
ConfTarget: uint32(in.TargetConf),
|
|
|
|
FeeRate: satPerKw,
|
|
|
|
},
|
2017-11-23 09:57:23 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-07-28 04:39:38 +03:00
|
|
|
rpcsLog.Infof("[sendmany] outputs=%v, sat/kw=%v",
|
|
|
|
spew.Sdump(in.AddrToAmount), int64(feePerKw))
|
2017-11-23 09:57:23 +03:00
|
|
|
|
2018-11-18 08:06:59 +03:00
|
|
|
var txid *chainhash.Hash
|
|
|
|
|
|
|
|
// We'll attempt to send to the target set of outputs, ensuring that we
|
|
|
|
// synchronize with any other ongoing coin selection attempts which
|
|
|
|
// happen to also be concurrently executing.
|
|
|
|
wallet := r.server.cc.wallet
|
|
|
|
err = wallet.WithCoinSelectLock(func() error {
|
|
|
|
sendManyTXID, err := r.sendCoinsOnChain(
|
|
|
|
in.AddrToAmount, feePerKw,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
txid = sendManyTXID
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
2015-12-30 02:09:38 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-06-29 21:31:29 +03:00
|
|
|
rpcsLog.Infof("[sendmany] spend generated txid: %v", txid.String())
|
2016-04-25 06:26:32 +03:00
|
|
|
|
|
|
|
return &lnrpc.SendManyResponse{Txid: txid.String()}, nil
|
2015-12-30 02:09:38 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// NewAddress creates a new address under control of the local wallet.
|
2016-04-25 06:26:32 +03:00
|
|
|
func (r *rpcServer) NewAddress(ctx context.Context,
|
|
|
|
in *lnrpc.NewAddressRequest) (*lnrpc.NewAddressResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2016-04-25 06:26:32 +03:00
|
|
|
// Translate the gRPC proto address type to the wallet controller's
|
|
|
|
// available address types.
|
2019-02-20 06:17:46 +03:00
|
|
|
var (
|
|
|
|
addr btcutil.Address
|
|
|
|
err error
|
|
|
|
)
|
2016-04-25 06:26:32 +03:00
|
|
|
switch in.Type {
|
2018-09-27 16:49:44 +03:00
|
|
|
case lnrpc.AddressType_WITNESS_PUBKEY_HASH:
|
2019-02-20 06:17:46 +03:00
|
|
|
addr, err = r.server.cc.wallet.NewAddress(
|
|
|
|
lnwallet.WitnessPubKey, false,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-09-27 16:49:44 +03:00
|
|
|
case lnrpc.AddressType_NESTED_PUBKEY_HASH:
|
2019-02-20 06:17:46 +03:00
|
|
|
addr, err = r.server.cc.wallet.NewAddress(
|
|
|
|
lnwallet.NestedWitnessPubKey, false,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-04-25 06:26:32 +03:00
|
|
|
|
2019-02-20 06:17:46 +03:00
|
|
|
case lnrpc.AddressType_UNUSED_WITNESS_PUBKEY_HASH:
|
|
|
|
addr, err = r.server.cc.wallet.LastUnusedAddress(
|
|
|
|
lnwallet.WitnessPubKey,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
case lnrpc.AddressType_UNUSED_NESTED_PUBKEY_HASH:
|
|
|
|
addr, err = r.server.cc.wallet.LastUnusedAddress(
|
|
|
|
lnwallet.NestedWitnessPubKey,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-12-30 02:09:38 +03:00
|
|
|
}
|
|
|
|
|
2019-04-10 10:08:36 +03:00
|
|
|
rpcsLog.Debugf("[newaddress] type=%v addr=%v", in.Type, addr.String())
|
2015-12-30 02:09:38 +03:00
|
|
|
return &lnrpc.NewAddressResponse{Address: addr.String()}, nil
|
|
|
|
}
|
2015-12-31 06:56:57 +03:00
|
|
|
|
2018-04-26 05:45:26 +03:00
|
|
|
var (
|
|
|
|
// signedMsgPrefix is a special prefix that we'll prepend to any
|
|
|
|
// messages we sign/verify. We do this to ensure that we don't
|
|
|
|
// accidentally sign a sighash, or other sensitive material. By
|
|
|
|
// prepending this fragment, we mind message signing to our particular
|
|
|
|
// context.
|
|
|
|
signedMsgPrefix = []byte("Lightning Signed Message:")
|
|
|
|
)
|
|
|
|
|
2017-04-20 05:28:10 +03:00
|
|
|
// SignMessage signs a message with the resident node's private key. The
|
2018-04-26 05:45:26 +03:00
|
|
|
// returned signature string is zbase32 encoded and pubkey recoverable, meaning
|
|
|
|
// that only the message digest and signature are needed for verification.
|
2017-04-20 05:28:10 +03:00
|
|
|
func (r *rpcServer) SignMessage(ctx context.Context,
|
|
|
|
in *lnrpc.SignMessageRequest) (*lnrpc.SignMessageResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2017-04-20 05:28:10 +03:00
|
|
|
if in.Msg == nil {
|
|
|
|
return nil, fmt.Errorf("need a message to sign")
|
|
|
|
}
|
|
|
|
|
2018-04-26 05:45:26 +03:00
|
|
|
in.Msg = append(signedMsgPrefix, in.Msg...)
|
2017-04-29 14:44:29 +03:00
|
|
|
sigBytes, err := r.server.nodeSigner.SignCompact(in.Msg)
|
2017-04-20 05:28:10 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
sig := zbase32.EncodeToString(sigBytes)
|
|
|
|
return &lnrpc.SignMessageResponse{Signature: sig}, nil
|
|
|
|
}
|
|
|
|
|
2018-04-26 05:45:26 +03:00
|
|
|
// VerifyMessage verifies a signature over a msg. The signature must be zbase32
|
|
|
|
// encoded and signed by an active node in the resident node's channel
|
|
|
|
// database. In addition to returning the validity of the signature,
|
2017-04-29 14:44:29 +03:00
|
|
|
// VerifyMessage also returns the recovered pubkey from the signature.
|
2017-04-20 05:28:10 +03:00
|
|
|
func (r *rpcServer) VerifyMessage(ctx context.Context,
|
|
|
|
in *lnrpc.VerifyMessageRequest) (*lnrpc.VerifyMessageResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2017-04-20 05:28:10 +03:00
|
|
|
if in.Msg == nil {
|
|
|
|
return nil, fmt.Errorf("need a message to verify")
|
|
|
|
}
|
|
|
|
|
|
|
|
// The signature should be zbase32 encoded
|
|
|
|
sig, err := zbase32.DecodeString(in.Signature)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to decode signature: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-04-29 14:44:29 +03:00
|
|
|
// The signature is over the double-sha256 hash of the message.
|
2018-04-26 05:45:26 +03:00
|
|
|
in.Msg = append(signedMsgPrefix, in.Msg...)
|
2017-04-20 05:28:10 +03:00
|
|
|
digest := chainhash.DoubleHashB(in.Msg)
|
|
|
|
|
|
|
|
// RecoverCompact both recovers the pubkey and validates the signature.
|
|
|
|
pubKey, _, err := btcec.RecoverCompact(btcec.S256(), sig, digest)
|
|
|
|
if err != nil {
|
|
|
|
return &lnrpc.VerifyMessageResponse{Valid: false}, nil
|
|
|
|
}
|
2017-04-29 14:44:29 +03:00
|
|
|
pubKeyHex := hex.EncodeToString(pubKey.SerializeCompressed())
|
2017-04-20 05:28:10 +03:00
|
|
|
|
2018-01-31 07:30:00 +03:00
|
|
|
var pub [33]byte
|
|
|
|
copy(pub[:], pubKey.SerializeCompressed())
|
|
|
|
|
2017-04-29 14:44:29 +03:00
|
|
|
// Query the channel graph to ensure a node in the network with active
|
|
|
|
// channels signed the message.
|
2018-04-26 05:45:26 +03:00
|
|
|
//
|
2017-04-20 05:28:10 +03:00
|
|
|
// TODO(phlip9): Require valid nodes to have capital in active channels.
|
2017-04-29 14:44:29 +03:00
|
|
|
graph := r.server.chanDB.ChannelGraph()
|
2018-01-31 07:30:00 +03:00
|
|
|
_, active, err := graph.HasLightningNode(pub)
|
2017-04-20 05:28:10 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to query graph: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-08-03 06:59:43 +03:00
|
|
|
return &lnrpc.VerifyMessageResponse{
|
|
|
|
Valid: active,
|
|
|
|
Pubkey: pubKeyHex,
|
|
|
|
}, nil
|
2017-04-20 05:28:10 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// ConnectPeer attempts to establish a connection to a remote peer.
|
2016-01-17 06:12:36 +03:00
|
|
|
func (r *rpcServer) ConnectPeer(ctx context.Context,
|
|
|
|
in *lnrpc.ConnectPeerRequest) (*lnrpc.ConnectPeerResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2017-08-03 06:59:43 +03:00
|
|
|
// The server hasn't yet started, so it won't be able to service any of
|
|
|
|
// our requests, so we'll bail early here.
|
|
|
|
if !r.server.Started() {
|
|
|
|
return nil, fmt.Errorf("chain backend is still syncing, server " +
|
|
|
|
"not active yet")
|
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
if in.Addr == nil {
|
2016-01-02 07:27:40 +03:00
|
|
|
return nil, fmt.Errorf("need: lnc pubkeyhash@hostname")
|
|
|
|
}
|
|
|
|
|
2016-10-28 05:49:10 +03:00
|
|
|
pubkeyHex, err := hex.DecodeString(in.Addr.Pubkey)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-04-21 23:32:17 +03:00
|
|
|
pubKey, err := btcec.ParsePubKey(pubkeyHex, btcec.S256())
|
2016-10-28 05:49:10 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-06-21 21:52:09 +03:00
|
|
|
|
2017-04-21 23:32:17 +03:00
|
|
|
// Connections to ourselves are disallowed for obvious reasons.
|
|
|
|
if pubKey.IsEqual(r.server.identityPriv.PubKey()) {
|
|
|
|
return nil, fmt.Errorf("cannot make connection to self")
|
|
|
|
}
|
|
|
|
|
2018-04-27 23:59:59 +03:00
|
|
|
addr, err := parseAddr(in.Addr.Host)
|
2016-06-21 21:52:09 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-10-28 05:49:10 +03:00
|
|
|
peerAddr := &lnwire.NetAddress{
|
2017-04-21 23:32:17 +03:00
|
|
|
IdentityKey: pubKey,
|
2018-04-27 23:59:59 +03:00
|
|
|
Address: addr,
|
2016-10-28 05:49:10 +03:00
|
|
|
ChainNet: activeNetParams.Net,
|
|
|
|
}
|
|
|
|
|
2019-01-04 05:38:38 +03:00
|
|
|
rpcsLog.Debugf("[connectpeer] requested connection to %x@%s",
|
|
|
|
peerAddr.IdentityKey.SerializeCompressed(), peerAddr.Address)
|
|
|
|
|
2017-01-10 06:08:52 +03:00
|
|
|
if err := r.server.ConnectToPeer(peerAddr, in.Perm); err != nil {
|
2019-01-04 05:38:38 +03:00
|
|
|
rpcsLog.Errorf("[connectpeer]: error connecting to peer: %v", err)
|
2016-06-21 21:52:09 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
rpcsLog.Debugf("Connected to peer: %v", peerAddr.String())
|
2017-01-10 06:08:52 +03:00
|
|
|
return &lnrpc.ConnectPeerResponse{}, nil
|
2016-06-21 21:52:09 +03:00
|
|
|
}
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2017-05-06 02:02:03 +03:00
|
|
|
// DisconnectPeer attempts to disconnect one peer from another identified by a
|
2017-12-30 17:44:31 +03:00
|
|
|
// given pubKey. In the case that we currently have a pending or active channel
|
2017-07-28 02:39:49 +03:00
|
|
|
// with the target peer, this action will be disallowed.
|
2017-05-06 02:02:03 +03:00
|
|
|
func (r *rpcServer) DisconnectPeer(ctx context.Context,
|
|
|
|
in *lnrpc.DisconnectPeerRequest) (*lnrpc.DisconnectPeerResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2017-05-06 02:02:03 +03:00
|
|
|
rpcsLog.Debugf("[disconnectpeer] from peer(%s)", in.PubKey)
|
|
|
|
|
2017-08-03 06:59:43 +03:00
|
|
|
if !r.server.Started() {
|
|
|
|
return nil, fmt.Errorf("chain backend is still syncing, server " +
|
|
|
|
"not active yet")
|
|
|
|
}
|
|
|
|
|
2017-05-06 02:02:03 +03:00
|
|
|
// First we'll validate the string passed in within the request to
|
|
|
|
// ensure that it's a valid hex-string, and also a valid compressed
|
|
|
|
// public key.
|
|
|
|
pubKeyBytes, err := hex.DecodeString(in.PubKey)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to decode pubkey bytes: %v", err)
|
|
|
|
}
|
|
|
|
peerPubKey, err := btcec.ParsePubKey(pubKeyBytes, btcec.S256())
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to parse pubkey: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, we'll fetch the pending/active channels we have with a
|
|
|
|
// particular peer.
|
|
|
|
nodeChannels, err := r.server.chanDB.FetchOpenChannels(peerPubKey)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to fetch channels for peer: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// In order to avoid erroneously disconnecting from a peer that we have
|
|
|
|
// an active channel with, if we have any channels active with this
|
|
|
|
// peer, then we'll disallow disconnecting from them.
|
2018-02-23 01:28:48 +03:00
|
|
|
if len(nodeChannels) > 0 && !cfg.UnsafeDisconnect {
|
2017-05-06 02:02:03 +03:00
|
|
|
return nil, fmt.Errorf("cannot disconnect from peer(%x), "+
|
|
|
|
"all active channels with the peer need to be closed "+
|
|
|
|
"first", pubKeyBytes)
|
|
|
|
}
|
|
|
|
|
|
|
|
// With all initial validation complete, we'll now request that the
|
2017-12-30 17:44:31 +03:00
|
|
|
// server disconnects from the peer.
|
2017-05-06 02:02:03 +03:00
|
|
|
if err := r.server.DisconnectPeer(peerPubKey); err != nil {
|
2017-05-02 22:31:35 +03:00
|
|
|
return nil, fmt.Errorf("unable to disconnect peer: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &lnrpc.DisconnectPeerResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2018-10-17 02:44:26 +03:00
|
|
|
// extractOpenChannelMinConfs extracts the minimum number of confirmations from
|
|
|
|
// the OpenChannelRequest that each output used to fund the channel's funding
|
|
|
|
// transaction should satisfy.
|
|
|
|
func extractOpenChannelMinConfs(in *lnrpc.OpenChannelRequest) (int32, error) {
|
|
|
|
switch {
|
|
|
|
// Ensure that the MinConfs parameter is non-negative.
|
|
|
|
case in.MinConfs < 0:
|
|
|
|
return 0, errors.New("minimum number of confirmations must " +
|
|
|
|
"be a non-negative number")
|
|
|
|
|
|
|
|
// The funding transaction should not be funded with unconfirmed outputs
|
|
|
|
// unless explicitly specified by SpendUnconfirmed. We do this to
|
|
|
|
// provide sane defaults to the OpenChannel RPC, as otherwise, if the
|
|
|
|
// MinConfs field isn't explicitly set by the caller, we'll use
|
|
|
|
// unconfirmed outputs without the caller being aware.
|
|
|
|
case in.MinConfs == 0 && !in.SpendUnconfirmed:
|
|
|
|
return 1, nil
|
|
|
|
|
|
|
|
// In the event that the caller set MinConfs > 0 and SpendUnconfirmed to
|
|
|
|
// true, we'll return an error to indicate the conflict.
|
|
|
|
case in.MinConfs > 0 && in.SpendUnconfirmed:
|
|
|
|
return 0, errors.New("SpendUnconfirmed set to true with " +
|
|
|
|
"MinConfs > 0")
|
|
|
|
|
|
|
|
// The funding transaction of the new channel to be created can be
|
|
|
|
// funded with unconfirmed outputs.
|
|
|
|
case in.SpendUnconfirmed:
|
|
|
|
return 0, nil
|
|
|
|
|
|
|
|
// If none of the above cases matched, we'll return the value set
|
|
|
|
// explicitly by the caller.
|
|
|
|
default:
|
|
|
|
return in.MinConfs, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// OpenChannel attempts to open a singly funded channel specified in the
|
|
|
|
// request to a remote peer.
|
2016-07-08 01:30:55 +03:00
|
|
|
func (r *rpcServer) OpenChannel(in *lnrpc.OpenChannelRequest,
|
|
|
|
updateStream lnrpc.Lightning_OpenChannelServer) error {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2018-02-20 02:01:23 +03:00
|
|
|
rpcsLog.Tracef("[openchannel] request to NodeKey(%v) "+
|
2018-02-14 08:48:42 +03:00
|
|
|
"allocation(us=%v, them=%v)", in.NodePubkeyString,
|
2017-07-31 00:25:03 +03:00
|
|
|
in.LocalFundingAmount, in.PushSat)
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2017-08-03 06:59:43 +03:00
|
|
|
if !r.server.Started() {
|
|
|
|
return fmt.Errorf("chain backend is still syncing, server " +
|
|
|
|
"not active yet")
|
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
localFundingAmt := btcutil.Amount(in.LocalFundingAmount)
|
2017-01-10 06:05:11 +03:00
|
|
|
remoteInitialBalance := btcutil.Amount(in.PushSat)
|
2017-12-17 02:00:11 +03:00
|
|
|
minHtlc := lnwire.MilliSatoshi(in.MinHtlcMsat)
|
2018-03-14 16:15:44 +03:00
|
|
|
remoteCsvDelay := uint16(in.RemoteCsvDelay)
|
2017-01-10 06:05:11 +03:00
|
|
|
|
|
|
|
// Ensure that the initial balance of the remote party (if pushing
|
2017-08-06 02:52:37 +03:00
|
|
|
// satoshis) does not exceed the amount the local party has requested
|
2017-01-10 06:05:11 +03:00
|
|
|
// for funding.
|
2017-08-22 09:25:41 +03:00
|
|
|
//
|
|
|
|
// TODO(roasbeef): incorporate base fee?
|
2017-01-10 06:05:11 +03:00
|
|
|
if remoteInitialBalance >= localFundingAmt {
|
|
|
|
return fmt.Errorf("amount pushed to remote peer for initial " +
|
|
|
|
"state must be below the local funding amount")
|
|
|
|
}
|
2016-11-11 04:37:21 +03:00
|
|
|
|
2017-08-06 02:52:37 +03:00
|
|
|
// Ensure that the user doesn't exceed the current soft-limit for
|
|
|
|
// channel size. If the funding amount is above the soft-limit, then
|
|
|
|
// we'll reject the request.
|
2019-05-15 10:02:53 +03:00
|
|
|
if localFundingAmt > MaxFundingAmount {
|
2017-08-06 02:52:37 +03:00
|
|
|
return fmt.Errorf("funding amount is too large, the max "+
|
2019-05-15 10:02:53 +03:00
|
|
|
"channel size is: %v", MaxFundingAmount)
|
2017-08-06 02:52:37 +03:00
|
|
|
}
|
|
|
|
|
2018-03-19 02:58:39 +03:00
|
|
|
// Restrict the size of the channel we'll actually open. At a later
|
|
|
|
// level, we'll ensure that the output we create after accounting for
|
|
|
|
// fees that a dust output isn't created.
|
|
|
|
if localFundingAmt < minChanFundingSize {
|
2017-01-17 07:46:32 +03:00
|
|
|
return fmt.Errorf("channel is too small, the minimum channel "+
|
2018-03-19 02:58:39 +03:00
|
|
|
"size is: %v SAT", int64(minChanFundingSize))
|
2017-01-17 07:46:32 +03:00
|
|
|
}
|
|
|
|
|
2018-10-17 02:44:26 +03:00
|
|
|
// Then, we'll extract the minimum number of confirmations that each
|
|
|
|
// output we use to fund the channel's funding transaction should
|
|
|
|
// satisfy.
|
|
|
|
minConfs, err := extractOpenChannelMinConfs(in)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2018-08-10 05:44:25 +03:00
|
|
|
}
|
|
|
|
|
2017-01-15 05:12:20 +03:00
|
|
|
var (
|
2017-04-21 23:32:17 +03:00
|
|
|
nodePubKey *btcec.PublicKey
|
|
|
|
nodePubKeyBytes []byte
|
2017-01-15 05:12:20 +03:00
|
|
|
)
|
|
|
|
|
2017-03-14 06:39:16 +03:00
|
|
|
// TODO(roasbeef): also return channel ID?
|
|
|
|
|
2018-02-20 01:55:22 +03:00
|
|
|
// Ensure that the NodePubKey is set before attempting to use it
|
|
|
|
if len(in.NodePubkey) == 0 {
|
|
|
|
return fmt.Errorf("NodePubKey is not set")
|
|
|
|
}
|
2017-04-21 23:32:17 +03:00
|
|
|
|
2018-02-14 08:48:42 +03:00
|
|
|
// Parse the raw bytes of the node key into a pubkey object so we
|
|
|
|
// can easily manipulate it.
|
|
|
|
nodePubKey, err = btcec.ParsePubKey(in.NodePubkey, btcec.S256())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-04-21 23:32:17 +03:00
|
|
|
|
2018-02-14 08:48:42 +03:00
|
|
|
// Making a channel to ourselves wouldn't be of any use, so we
|
|
|
|
// explicitly disallow them.
|
|
|
|
if nodePubKey.IsEqual(r.server.identityPriv.PubKey()) {
|
|
|
|
return fmt.Errorf("cannot open channel to self")
|
2016-10-28 05:49:10 +03:00
|
|
|
}
|
|
|
|
|
2018-02-14 08:48:42 +03:00
|
|
|
nodePubKeyBytes = nodePubKey.SerializeCompressed()
|
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
// Based on the passed fee related parameters, we'll determine an
|
|
|
|
// appropriate fee rate for the funding transaction.
|
2018-11-18 07:44:34 +03:00
|
|
|
satPerKw := lnwallet.SatPerKVByte(in.SatPerByte * 1000).FeePerKWeight()
|
|
|
|
feeRate, err := sweep.DetermineFeePerKw(
|
|
|
|
r.server.cc.feeEstimator, sweep.FeePreference{
|
|
|
|
ConfTarget: uint32(in.TargetConf),
|
|
|
|
FeeRate: satPerKw,
|
|
|
|
},
|
2017-11-23 09:57:23 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-07-28 04:39:38 +03:00
|
|
|
rpcsLog.Debugf("[openchannel]: using fee of %v sat/kw for funding tx",
|
|
|
|
int64(feeRate))
|
2017-11-23 09:57:23 +03:00
|
|
|
|
2016-11-11 04:37:21 +03:00
|
|
|
// Instruct the server to trigger the necessary events to attempt to
|
|
|
|
// open a new channel. A stream is returned in place, this stream will
|
|
|
|
// be used to consume updates of the state of the pending channel.
|
2018-08-10 05:17:16 +03:00
|
|
|
req := &openChanReq{
|
|
|
|
targetPubkey: nodePubKey,
|
|
|
|
chainHash: *activeNetParams.GenesisHash,
|
|
|
|
localFundingAmt: localFundingAmt,
|
|
|
|
pushAmt: lnwire.NewMSatFromSatoshis(remoteInitialBalance),
|
|
|
|
minHtlc: minHtlc,
|
|
|
|
fundingFeePerKw: feeRate,
|
|
|
|
private: in.Private,
|
|
|
|
remoteCsvDelay: remoteCsvDelay,
|
2018-10-17 02:44:26 +03:00
|
|
|
minConfs: minConfs,
|
2018-08-10 05:17:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
updateChan, errChan := r.server.OpenChannel(req)
|
2016-01-02 07:27:40 +03:00
|
|
|
|
2016-08-31 02:52:53 +03:00
|
|
|
var outpoint wire.OutPoint
|
|
|
|
out:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case err := <-errChan:
|
2018-02-20 02:01:23 +03:00
|
|
|
rpcsLog.Errorf("unable to open channel to NodeKey(%x): %v",
|
2018-02-14 08:48:42 +03:00
|
|
|
nodePubKeyBytes, err)
|
2016-07-08 01:30:55 +03:00
|
|
|
return err
|
2016-08-31 02:52:53 +03:00
|
|
|
case fundingUpdate := <-updateChan:
|
|
|
|
rpcsLog.Tracef("[openchannel] sending update: %v",
|
|
|
|
fundingUpdate)
|
|
|
|
if err := updateStream.Send(fundingUpdate); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// If a final channel open update is being sent, then
|
|
|
|
// we can break out of our recv loop as we no longer
|
|
|
|
// need to process any further updates.
|
|
|
|
switch update := fundingUpdate.Update.(type) {
|
|
|
|
case *lnrpc.OpenStatusUpdate_ChanOpen:
|
|
|
|
chanPoint := update.ChanOpen.ChannelPoint
|
2019-05-15 10:02:53 +03:00
|
|
|
txid, err := GetChanPointFundingTxid(chanPoint)
|
2018-01-11 07:59:30 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-08-31 02:52:53 +03:00
|
|
|
outpoint = wire.OutPoint{
|
2018-12-10 07:12:24 +03:00
|
|
|
Hash: *txid,
|
2016-08-31 02:52:53 +03:00
|
|
|
Index: chanPoint.OutputIndex,
|
|
|
|
}
|
|
|
|
|
|
|
|
break out
|
|
|
|
}
|
|
|
|
case <-r.quit:
|
|
|
|
return nil
|
2016-07-08 01:30:55 +03:00
|
|
|
}
|
|
|
|
}
|
2016-08-31 02:52:53 +03:00
|
|
|
|
2018-02-20 02:01:23 +03:00
|
|
|
rpcsLog.Tracef("[openchannel] success NodeKey(%x), ChannelPoint(%v)",
|
2018-02-14 08:48:42 +03:00
|
|
|
nodePubKeyBytes, outpoint)
|
2016-07-08 01:30:55 +03:00
|
|
|
return nil
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
|
|
|
|
2016-11-11 04:37:21 +03:00
|
|
|
// OpenChannelSync is a synchronous version of the OpenChannel RPC call. This
|
|
|
|
// call is meant to be consumed by clients to the REST proxy. As with all other
|
|
|
|
// sync calls, all byte slices are instead to be populated as hex encoded
|
|
|
|
// strings.
|
|
|
|
func (r *rpcServer) OpenChannelSync(ctx context.Context,
|
|
|
|
in *lnrpc.OpenChannelRequest) (*lnrpc.ChannelPoint, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2018-02-20 02:01:23 +03:00
|
|
|
rpcsLog.Tracef("[openchannel] request to NodeKey(%v) "+
|
2018-02-14 08:48:42 +03:00
|
|
|
"allocation(us=%v, them=%v)", in.NodePubkeyString,
|
2017-07-31 00:25:03 +03:00
|
|
|
in.LocalFundingAmount, in.PushSat)
|
2016-11-11 04:37:21 +03:00
|
|
|
|
2017-08-03 06:59:43 +03:00
|
|
|
// We don't allow new channels to be open while the server is still
|
|
|
|
// syncing, as otherwise we may not be able to obtain the relevant
|
|
|
|
// notifications.
|
|
|
|
if !r.server.Started() {
|
|
|
|
return nil, fmt.Errorf("chain backend is still syncing, server " +
|
|
|
|
"not active yet")
|
|
|
|
}
|
|
|
|
|
2017-01-25 04:12:51 +03:00
|
|
|
// Creation of channels before the wallet syncs up is currently
|
|
|
|
// disallowed.
|
2017-12-10 10:42:46 +03:00
|
|
|
isSynced, _, err := r.server.cc.wallet.IsSynced()
|
2017-01-25 04:12:51 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if !isSynced {
|
|
|
|
return nil, errors.New("channels cannot be created before the " +
|
|
|
|
"wallet is fully synced")
|
|
|
|
}
|
|
|
|
|
2016-11-11 04:37:21 +03:00
|
|
|
// Decode the provided target node's public key, parsing it into a pub
|
|
|
|
// key object. For all sync call, byte slices are expected to be
|
|
|
|
// encoded as hex strings.
|
|
|
|
keyBytes, err := hex.DecodeString(in.NodePubkeyString)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
nodepubKey, err := btcec.ParsePubKey(keyBytes, btcec.S256())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
localFundingAmt := btcutil.Amount(in.LocalFundingAmount)
|
2017-01-10 06:05:11 +03:00
|
|
|
remoteInitialBalance := btcutil.Amount(in.PushSat)
|
2017-12-17 02:00:11 +03:00
|
|
|
minHtlc := lnwire.MilliSatoshi(in.MinHtlcMsat)
|
2018-03-14 16:15:44 +03:00
|
|
|
remoteCsvDelay := uint16(in.RemoteCsvDelay)
|
2017-01-10 06:05:11 +03:00
|
|
|
|
|
|
|
// Ensure that the initial balance of the remote party (if pushing
|
2017-12-30 17:44:31 +03:00
|
|
|
// satoshis) does not exceed the amount the local party has requested
|
2017-01-10 06:05:11 +03:00
|
|
|
// for funding.
|
|
|
|
if remoteInitialBalance >= localFundingAmt {
|
|
|
|
return nil, fmt.Errorf("amount pushed to remote peer for " +
|
|
|
|
"initial state must be below the local funding amount")
|
|
|
|
}
|
2016-11-11 04:37:21 +03:00
|
|
|
|
2018-03-19 02:58:39 +03:00
|
|
|
// Restrict the size of the channel we'll actually open. At a later
|
|
|
|
// level, we'll ensure that the output we create after accounting for
|
|
|
|
// fees that a dust output isn't created.
|
|
|
|
if localFundingAmt < minChanFundingSize {
|
|
|
|
return nil, fmt.Errorf("channel is too small, the minimum channel "+
|
|
|
|
"size is: %v SAT", int64(minChanFundingSize))
|
|
|
|
}
|
|
|
|
|
2018-10-17 02:44:26 +03:00
|
|
|
// Then, we'll extract the minimum number of confirmations that each
|
|
|
|
// output we use to fund the channel's funding transaction should
|
|
|
|
// satisfy.
|
|
|
|
minConfs, err := extractOpenChannelMinConfs(in)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2018-08-10 05:44:25 +03:00
|
|
|
}
|
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
// Based on the passed fee related parameters, we'll determine an
|
2017-12-30 17:44:31 +03:00
|
|
|
// appropriate fee rate for the funding transaction.
|
2018-11-18 07:44:34 +03:00
|
|
|
satPerKw := lnwallet.SatPerKVByte(in.SatPerByte * 1000).FeePerKWeight()
|
|
|
|
feeRate, err := sweep.DetermineFeePerKw(
|
|
|
|
r.server.cc.feeEstimator, sweep.FeePreference{
|
|
|
|
ConfTarget: uint32(in.TargetConf),
|
|
|
|
FeeRate: satPerKw,
|
|
|
|
},
|
2017-11-23 09:57:23 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-07-28 04:39:38 +03:00
|
|
|
rpcsLog.Tracef("[openchannel] target sat/kw for funding tx: %v",
|
2018-02-13 17:13:01 +03:00
|
|
|
int64(feeRate))
|
2017-11-23 09:57:23 +03:00
|
|
|
|
2018-08-10 05:17:16 +03:00
|
|
|
req := &openChanReq{
|
|
|
|
targetPubkey: nodepubKey,
|
|
|
|
chainHash: *activeNetParams.GenesisHash,
|
|
|
|
localFundingAmt: localFundingAmt,
|
|
|
|
pushAmt: lnwire.NewMSatFromSatoshis(remoteInitialBalance),
|
|
|
|
minHtlc: minHtlc,
|
|
|
|
fundingFeePerKw: feeRate,
|
|
|
|
private: in.Private,
|
|
|
|
remoteCsvDelay: remoteCsvDelay,
|
2018-10-17 02:44:26 +03:00
|
|
|
minConfs: minConfs,
|
2018-08-10 05:17:16 +03:00
|
|
|
}
|
2016-11-11 04:37:21 +03:00
|
|
|
|
2018-08-10 05:17:16 +03:00
|
|
|
updateChan, errChan := r.server.OpenChannel(req)
|
2016-11-11 04:37:21 +03:00
|
|
|
select {
|
|
|
|
// If an error occurs them immediately return the error to the client.
|
|
|
|
case err := <-errChan:
|
2018-02-20 02:01:23 +03:00
|
|
|
rpcsLog.Errorf("unable to open channel to NodeKey(%x): %v",
|
2018-02-14 08:48:42 +03:00
|
|
|
nodepubKey, err)
|
2016-11-11 04:37:21 +03:00
|
|
|
return nil, err
|
|
|
|
|
|
|
|
// Otherwise, wait for the first channel update. The first update sent
|
|
|
|
// is when the funding transaction is broadcast to the network.
|
|
|
|
case fundingUpdate := <-updateChan:
|
|
|
|
rpcsLog.Tracef("[openchannel] sending update: %v",
|
|
|
|
fundingUpdate)
|
|
|
|
|
|
|
|
// Parse out the txid of the pending funding transaction. The
|
|
|
|
// sync client can use this to poll against the list of
|
|
|
|
// PendingChannels.
|
|
|
|
openUpdate := fundingUpdate.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
|
|
|
|
chanUpdate := openUpdate.ChanPending
|
|
|
|
|
|
|
|
return &lnrpc.ChannelPoint{
|
2018-01-11 07:59:30 +03:00
|
|
|
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
|
|
|
|
FundingTxidBytes: chanUpdate.Txid,
|
|
|
|
},
|
2018-11-26 06:12:38 +03:00
|
|
|
OutputIndex: chanUpdate.OutputIndex,
|
2016-11-11 04:37:21 +03:00
|
|
|
}, nil
|
|
|
|
case <-r.quit:
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-15 10:02:53 +03:00
|
|
|
// GetChanPointFundingTxid returns the given channel point's funding txid in
|
2018-01-11 07:59:30 +03:00
|
|
|
// raw bytes.
|
2019-05-15 10:02:53 +03:00
|
|
|
func GetChanPointFundingTxid(chanPoint *lnrpc.ChannelPoint) (*chainhash.Hash, error) {
|
2018-01-11 07:59:30 +03:00
|
|
|
var txid []byte
|
|
|
|
|
|
|
|
// A channel point's funding txid can be get/set as a byte slice or a
|
|
|
|
// string. In the case it is a string, decode it.
|
|
|
|
switch chanPoint.GetFundingTxid().(type) {
|
|
|
|
case *lnrpc.ChannelPoint_FundingTxidBytes:
|
|
|
|
txid = chanPoint.GetFundingTxidBytes()
|
|
|
|
case *lnrpc.ChannelPoint_FundingTxidStr:
|
|
|
|
s := chanPoint.GetFundingTxidStr()
|
|
|
|
h, err := chainhash.NewHashFromStr(s)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
txid = h[:]
|
|
|
|
}
|
|
|
|
|
2018-12-10 07:12:24 +03:00
|
|
|
return chainhash.NewHash(txid)
|
2018-01-11 07:59:30 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// CloseChannel attempts to close an active channel identified by its channel
|
2016-06-21 22:32:32 +03:00
|
|
|
// point. The actions of this method can additionally be augmented to attempt
|
|
|
|
// a force close after a timeout period in the case of an inactive peer.
|
2016-07-08 01:30:55 +03:00
|
|
|
func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest,
|
|
|
|
updateStream lnrpc.Lightning_CloseChannelServer) error {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2018-12-12 02:56:00 +03:00
|
|
|
// If the user didn't specify a channel point, then we'll reject this
|
|
|
|
// request all together.
|
|
|
|
if in.GetChannelPoint() == nil {
|
|
|
|
return fmt.Errorf("must specify channel point in close channel")
|
|
|
|
}
|
|
|
|
|
2019-06-20 13:02:36 +03:00
|
|
|
// If force closing a channel, the fee set in the commitment transaction
|
|
|
|
// is used.
|
|
|
|
if in.Force && (in.SatPerByte != 0 || in.TargetConf != 0) {
|
|
|
|
return fmt.Errorf("force closing a channel uses a pre-defined fee")
|
|
|
|
}
|
|
|
|
|
2016-09-12 22:42:26 +03:00
|
|
|
force := in.Force
|
2016-06-21 22:32:32 +03:00
|
|
|
index := in.ChannelPoint.OutputIndex
|
2019-05-15 10:02:53 +03:00
|
|
|
txid, err := GetChanPointFundingTxid(in.GetChannelPoint())
|
2018-01-11 07:59:30 +03:00
|
|
|
if err != nil {
|
|
|
|
rpcsLog.Errorf("[closechannel] unable to get funding txid: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
2017-01-04 03:02:51 +03:00
|
|
|
chanPoint := wire.NewOutPoint(txid, index)
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2017-09-12 19:00:40 +03:00
|
|
|
rpcsLog.Tracef("[closechannel] request for ChannelPoint(%v), force=%v",
|
|
|
|
chanPoint, force)
|
2017-01-04 03:02:51 +03:00
|
|
|
|
|
|
|
var (
|
2018-12-20 20:52:27 +03:00
|
|
|
updateChan chan interface{}
|
2017-01-04 03:02:51 +03:00
|
|
|
errChan chan error
|
|
|
|
)
|
|
|
|
|
2017-09-12 19:00:40 +03:00
|
|
|
// TODO(roasbeef): if force and peer online then don't force?
|
|
|
|
|
2018-03-30 22:51:01 +03:00
|
|
|
// First, we'll fetch the channel as is, as we'll need to examine it
|
|
|
|
// regardless of if this is a force close or not.
|
|
|
|
channel, err := r.fetchActiveChannel(*chanPoint)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-01-04 03:02:51 +03:00
|
|
|
// If a force closure was requested, then we'll handle all the details
|
|
|
|
// around the creation and broadcast of the unilateral closure
|
|
|
|
// transaction here rather than going to the switch as we don't require
|
|
|
|
// interaction from the peer.
|
|
|
|
if force {
|
2017-05-18 21:55:25 +03:00
|
|
|
_, bestHeight, err := r.server.cc.chainIO.GetBestBlock()
|
2017-05-11 03:27:05 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-05-05 02:04:41 +03:00
|
|
|
// As we're force closing this channel, as a precaution, we'll
|
|
|
|
// ensure that the switch doesn't continue to see this channel
|
|
|
|
// as eligible for forwarding HTLC's. If the peer is online,
|
|
|
|
// then we'll also purge all of its indexes.
|
|
|
|
remotePub := &channel.StateSnapshot().RemoteIdentity
|
2017-08-09 01:49:32 +03:00
|
|
|
if peer, err := r.server.FindPeer(remotePub); err == nil {
|
2017-05-16 04:13:39 +03:00
|
|
|
// TODO(roasbeef): actually get the active channel
|
|
|
|
// instead too?
|
|
|
|
// * so only need to grab from database
|
2017-11-23 22:36:12 +03:00
|
|
|
peer.WipeChannel(channel.ChannelPoint())
|
2017-05-05 02:04:41 +03:00
|
|
|
} else {
|
|
|
|
chanID := lnwire.NewChanIDFromOutPoint(channel.ChannelPoint())
|
2017-05-02 23:04:58 +03:00
|
|
|
r.server.htlcSwitch.RemoveLink(chanID)
|
2017-05-05 02:04:41 +03:00
|
|
|
}
|
|
|
|
|
2017-05-05 02:05:34 +03:00
|
|
|
// With the necessary indexes cleaned up, we'll now force close
|
|
|
|
// the channel.
|
2018-01-17 07:03:28 +03:00
|
|
|
chainArbitrator := r.server.chainArb
|
|
|
|
closingTx, err := chainArbitrator.ForceCloseContract(
|
|
|
|
*chanPoint,
|
|
|
|
)
|
2017-01-04 03:02:51 +03:00
|
|
|
if err != nil {
|
2017-02-21 06:53:04 +03:00
|
|
|
rpcsLog.Errorf("unable to force close transaction: %v", err)
|
2017-01-04 03:02:51 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-01-17 07:03:28 +03:00
|
|
|
closingTxid := closingTx.TxHash()
|
|
|
|
|
2017-05-05 02:05:34 +03:00
|
|
|
// With the transaction broadcast, we send our first update to
|
|
|
|
// the client.
|
2018-12-20 20:52:27 +03:00
|
|
|
updateChan = make(chan interface{}, 2)
|
|
|
|
updateChan <- &pendingUpdate{
|
|
|
|
Txid: closingTxid[:],
|
2017-05-05 02:05:34 +03:00
|
|
|
}
|
2016-11-29 05:44:14 +03:00
|
|
|
|
2017-05-05 02:05:34 +03:00
|
|
|
errChan = make(chan error, 1)
|
2017-05-18 21:55:25 +03:00
|
|
|
notifier := r.server.cc.chainNotifier
|
2017-05-11 03:27:05 +03:00
|
|
|
go waitForChanToClose(uint32(bestHeight), notifier, errChan, chanPoint,
|
2018-05-31 08:18:44 +03:00
|
|
|
&closingTxid, closingTx.TxOut[0].PkScript, func() {
|
2017-05-11 03:27:05 +03:00
|
|
|
// Respond to the local subsystem which
|
|
|
|
// requested the channel closure.
|
2018-12-20 20:52:27 +03:00
|
|
|
updateChan <- &channelCloseUpdate{
|
|
|
|
ClosingTxid: closingTxid[:],
|
|
|
|
Success: true,
|
2017-05-11 03:27:05 +03:00
|
|
|
}
|
2017-08-05 04:29:14 +03:00
|
|
|
})
|
2017-01-04 03:02:51 +03:00
|
|
|
} else {
|
2018-04-05 19:52:35 +03:00
|
|
|
// If the link is not known by the switch, we cannot gracefully close
|
|
|
|
// the channel.
|
|
|
|
channelID := lnwire.NewChanIDFromOutPoint(chanPoint)
|
|
|
|
if _, err := r.server.htlcSwitch.GetLink(channelID); err != nil {
|
|
|
|
rpcsLog.Debugf("Trying to non-force close offline channel with "+
|
|
|
|
"chan_point=%v", chanPoint)
|
|
|
|
return fmt.Errorf("unable to gracefully close channel while peer "+
|
|
|
|
"is offline (try force closing it instead): %v", err)
|
|
|
|
}
|
|
|
|
|
2018-01-17 07:03:28 +03:00
|
|
|
// Based on the passed fee related parameters, we'll determine
|
|
|
|
// an appropriate fee rate for the cooperative closure
|
2017-11-23 09:57:23 +03:00
|
|
|
// transaction.
|
2018-11-18 07:44:34 +03:00
|
|
|
satPerKw := lnwallet.SatPerKVByte(
|
|
|
|
in.SatPerByte * 1000,
|
|
|
|
).FeePerKWeight()
|
|
|
|
feeRate, err := sweep.DetermineFeePerKw(
|
|
|
|
r.server.cc.feeEstimator, sweep.FeePreference{
|
|
|
|
ConfTarget: uint32(in.TargetConf),
|
|
|
|
FeeRate: satPerKw,
|
|
|
|
},
|
2017-11-23 09:57:23 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-07-28 04:39:38 +03:00
|
|
|
rpcsLog.Debugf("Target sat/kw for closing transaction: %v",
|
2018-02-13 17:13:01 +03:00
|
|
|
int64(feeRate))
|
2017-11-23 09:57:23 +03:00
|
|
|
|
2018-03-30 22:51:01 +03:00
|
|
|
// Before we attempt the cooperative channel closure, we'll
|
|
|
|
// examine the channel to ensure that it doesn't have a
|
|
|
|
// lingering HTLC.
|
2018-03-30 23:04:59 +03:00
|
|
|
if len(channel.ActiveHtlcs()) != 0 {
|
|
|
|
return fmt.Errorf("cannot co-op close channel " +
|
2018-03-30 22:51:01 +03:00
|
|
|
"with active htlcs")
|
|
|
|
}
|
|
|
|
|
2017-01-04 03:02:51 +03:00
|
|
|
// Otherwise, the caller has requested a regular interactive
|
|
|
|
// cooperative channel closure. So we'll forward the request to
|
|
|
|
// the htlc switch which will handle the negotiation and
|
|
|
|
// broadcast details.
|
2018-06-15 03:14:31 +03:00
|
|
|
updateChan, errChan = r.server.htlcSwitch.CloseLink(
|
2018-07-28 04:39:38 +03:00
|
|
|
chanPoint, htlcswitch.CloseRegular, feeRate,
|
2018-06-15 03:14:31 +03:00
|
|
|
)
|
2017-01-04 03:02:51 +03:00
|
|
|
}
|
2016-08-31 02:52:53 +03:00
|
|
|
out:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case err := <-errChan:
|
|
|
|
rpcsLog.Errorf("[closechannel] unable to close "+
|
2017-01-04 03:02:51 +03:00
|
|
|
"ChannelPoint(%v): %v", chanPoint, err)
|
2016-07-08 01:30:55 +03:00
|
|
|
return err
|
2016-08-31 02:52:53 +03:00
|
|
|
case closingUpdate := <-updateChan:
|
2018-12-20 20:52:27 +03:00
|
|
|
rpcClosingUpdate, err := createRPCCloseUpdate(
|
|
|
|
closingUpdate,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-08-31 05:34:13 +03:00
|
|
|
rpcsLog.Tracef("[closechannel] sending update: %v",
|
2018-12-20 20:52:27 +03:00
|
|
|
rpcClosingUpdate)
|
|
|
|
|
|
|
|
if err := updateStream.Send(rpcClosingUpdate); err != nil {
|
2016-08-31 02:52:53 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// If a final channel closing updates is being sent,
|
|
|
|
// then we can break out of our dispatch loop as we no
|
|
|
|
// longer need to process any further updates.
|
2018-12-20 20:52:27 +03:00
|
|
|
switch closeUpdate := closingUpdate.(type) {
|
|
|
|
case *channelCloseUpdate:
|
|
|
|
h, _ := chainhash.NewHash(closeUpdate.ClosingTxid)
|
2016-09-12 22:42:26 +03:00
|
|
|
rpcsLog.Infof("[closechannel] close completed: "+
|
2016-08-31 02:52:53 +03:00
|
|
|
"txid(%v)", h)
|
|
|
|
break out
|
|
|
|
}
|
|
|
|
case <-r.quit:
|
|
|
|
return nil
|
2016-07-08 01:30:55 +03:00
|
|
|
}
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
|
|
|
|
2016-07-08 01:30:55 +03:00
|
|
|
return nil
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
2016-01-02 07:27:40 +03:00
|
|
|
|
2018-12-20 20:52:27 +03:00
|
|
|
func createRPCCloseUpdate(update interface{}) (
|
|
|
|
*lnrpc.CloseStatusUpdate, error) {
|
|
|
|
|
|
|
|
switch u := update.(type) {
|
|
|
|
case *channelCloseUpdate:
|
|
|
|
return &lnrpc.CloseStatusUpdate{
|
|
|
|
Update: &lnrpc.CloseStatusUpdate_ChanClose{
|
|
|
|
ChanClose: &lnrpc.ChannelCloseUpdate{
|
|
|
|
ClosingTxid: u.ClosingTxid,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
case *pendingUpdate:
|
|
|
|
return &lnrpc.CloseStatusUpdate{
|
|
|
|
Update: &lnrpc.CloseStatusUpdate_ClosePending{
|
|
|
|
ClosePending: &lnrpc.PendingUpdate{
|
|
|
|
Txid: u.Txid,
|
|
|
|
OutputIndex: u.OutputIndex,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, errors.New("unknown close status update")
|
|
|
|
}
|
|
|
|
|
2018-05-29 12:26:47 +03:00
|
|
|
// AbandonChannel removes all channel state from the database except for a
|
|
|
|
// close summary. This method can be used to get rid of permanently unusable
|
|
|
|
// channels due to bugs fixed in newer versions of lnd.
|
|
|
|
func (r *rpcServer) AbandonChannel(ctx context.Context,
|
|
|
|
in *lnrpc.AbandonChannelRequest) (*lnrpc.AbandonChannelResponse, error) {
|
|
|
|
|
2018-09-26 12:43:46 +03:00
|
|
|
// If this isn't the dev build, then we won't allow the RPC to be
|
2018-09-24 04:58:39 +03:00
|
|
|
// executed, as it's an advanced feature and won't be activated in
|
|
|
|
// regular production/release builds.
|
2018-09-26 12:43:46 +03:00
|
|
|
if !build.IsDevBuild() {
|
2018-05-29 12:26:47 +03:00
|
|
|
return nil, fmt.Errorf("AbandonChannel RPC call only " +
|
2018-09-26 12:43:46 +03:00
|
|
|
"available in dev builds")
|
2018-05-29 12:26:47 +03:00
|
|
|
}
|
|
|
|
|
2018-09-24 04:58:39 +03:00
|
|
|
// We'll parse out the arguments to we can obtain the chanPoint of the
|
|
|
|
// target channel.
|
2019-05-15 10:02:53 +03:00
|
|
|
txid, err := GetChanPointFundingTxid(in.GetChannelPoint())
|
2018-05-29 12:26:47 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-09-24 04:58:39 +03:00
|
|
|
index := in.ChannelPoint.OutputIndex
|
2018-05-29 12:26:47 +03:00
|
|
|
chanPoint := wire.NewOutPoint(txid, index)
|
|
|
|
|
2018-09-24 04:58:39 +03:00
|
|
|
// With the chanPoint constructed, we'll attempt to find the target
|
|
|
|
// channel in the database. If we can't find the channel, then we'll
|
|
|
|
// return the error back to the caller.
|
2019-01-05 01:59:04 +03:00
|
|
|
dbChan, err := r.server.chanDB.FetchChannel(*chanPoint)
|
2018-05-29 12:26:47 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-09-24 04:58:39 +03:00
|
|
|
// Now that we've found the channel, we'll populate a close summary for
|
|
|
|
// the channel, so we can store as much information for this abounded
|
|
|
|
// channel as possible. We also ensure that we set Pending to false, to
|
|
|
|
// indicate that this channel has been "fully" closed.
|
|
|
|
_, bestHeight, err := r.server.cc.chainIO.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2018-05-29 12:26:47 +03:00
|
|
|
}
|
2018-09-24 04:58:39 +03:00
|
|
|
summary := &channeldb.ChannelCloseSummary{
|
|
|
|
CloseType: channeldb.Abandoned,
|
|
|
|
ChanPoint: *chanPoint,
|
|
|
|
ChainHash: dbChan.ChainHash,
|
|
|
|
CloseHeight: uint32(bestHeight),
|
|
|
|
RemotePub: dbChan.IdentityPub,
|
|
|
|
Capacity: dbChan.Capacity,
|
|
|
|
SettledBalance: dbChan.LocalCommitment.LocalBalance.ToSatoshis(),
|
|
|
|
ShortChanID: dbChan.ShortChanID(),
|
|
|
|
RemoteCurrentRevocation: dbChan.RemoteCurrentRevocation,
|
|
|
|
RemoteNextRevocation: dbChan.RemoteNextRevocation,
|
|
|
|
LocalChanConfig: dbChan.LocalChanCfg,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, we'll close the channel in the DB, and return back to the
|
|
|
|
// caller.
|
2018-05-29 12:26:47 +03:00
|
|
|
err = dbChan.CloseChannel(summary)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return &lnrpc.AbandonChannelResponse{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// fetchActiveChannel attempts to locate a channel identified by its channel
|
|
|
|
// point from the database's set of all currently opened channels and
|
multi: replace per channel sigPool with global daemon level sigPool
In this commit, we remove the per channel `sigPool` within the
`lnwallet.LightningChannel` struct. With this change, we ensure that as
the number of channels grows, the number of gouroutines idling in the
sigPool stays constant. It's the case that currently on the daemon, most
channels are likely inactive, with only a hand full actually
consistently carrying out channel updates. As a result, this change
should reduce the amount of idle CPU usage, as we have less active
goroutines in select loops.
In order to make this change, the `SigPool` itself has been publicly
exported such that outside callers can make a `SigPool` and pass it into
newly created channels. Since the sig pool now lives outside the
channel, we were also able to do away with the Stop() method on the
channel all together.
Finally, the server is the sub-system that is currently responsible for
managing the `SigPool` within lnd.
2018-12-15 03:35:07 +03:00
|
|
|
// return it as a fully populated state machine
|
2018-05-29 12:26:47 +03:00
|
|
|
func (r *rpcServer) fetchActiveChannel(chanPoint wire.OutPoint) (
|
|
|
|
*lnwallet.LightningChannel, error) {
|
|
|
|
|
2019-01-05 01:59:04 +03:00
|
|
|
dbChan, err := r.server.chanDB.FetchChannel(chanPoint)
|
2018-05-29 12:26:47 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the channel is successfully fetched from the database,
|
|
|
|
// we create a fully populated channel state machine which
|
2017-01-04 03:02:51 +03:00
|
|
|
// uses the db channel as backing storage.
|
2018-01-17 07:03:28 +03:00
|
|
|
return lnwallet.NewLightningChannel(
|
2019-04-15 15:24:43 +03:00
|
|
|
r.server.cc.wallet.Cfg.Signer, dbChan, nil,
|
2018-01-17 07:03:28 +03:00
|
|
|
)
|
2017-01-04 03:02:51 +03:00
|
|
|
}
|
|
|
|
|
2017-07-28 02:39:49 +03:00
|
|
|
// GetInfo returns general information concerning the lightning node including
|
2018-02-07 06:13:07 +03:00
|
|
|
// its identity pubkey, alias, the chains it is connected to, and information
|
2017-07-28 02:39:49 +03:00
|
|
|
// concerning the number of open+pending channels.
|
2016-07-06 04:57:08 +03:00
|
|
|
func (r *rpcServer) GetInfo(ctx context.Context,
|
|
|
|
in *lnrpc.GetInfoRequest) (*lnrpc.GetInfoResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2016-07-06 04:57:08 +03:00
|
|
|
serverPeers := r.server.Peers()
|
|
|
|
|
2018-08-28 19:24:08 +03:00
|
|
|
openChannels, err := r.server.chanDB.FetchAllOpenChannels()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-02-26 02:21:08 +03:00
|
|
|
|
|
|
|
var activeChannels uint32
|
|
|
|
for _, channel := range openChannels {
|
|
|
|
chanID := lnwire.NewChanIDFromOutPoint(&channel.FundingOutpoint)
|
|
|
|
if r.server.htlcSwitch.HasActiveLink(chanID) {
|
|
|
|
activeChannels++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-28 19:24:08 +03:00
|
|
|
inactiveChannels := uint32(len(openChannels)) - activeChannels
|
|
|
|
|
2017-08-09 04:01:15 +03:00
|
|
|
pendingChannels, err := r.server.chanDB.FetchPendingChannels()
|
2017-01-23 10:31:01 +03:00
|
|
|
if err != nil {
|
2017-08-09 04:01:15 +03:00
|
|
|
return nil, fmt.Errorf("unable to get retrieve pending "+
|
2017-06-17 01:11:02 +03:00
|
|
|
"channels: %v", err)
|
2017-01-23 10:31:01 +03:00
|
|
|
}
|
2017-08-09 04:01:15 +03:00
|
|
|
nPendingChannels := uint32(len(pendingChannels))
|
2017-01-23 10:31:01 +03:00
|
|
|
|
2016-07-06 04:57:08 +03:00
|
|
|
idPub := r.server.identityPriv.PubKey().SerializeCompressed()
|
2018-01-07 08:54:14 +03:00
|
|
|
encodedIDPub := hex.EncodeToString(idPub)
|
2016-07-06 04:57:08 +03:00
|
|
|
|
2017-05-18 21:55:25 +03:00
|
|
|
bestHash, bestHeight, err := r.server.cc.chainIO.GetBestBlock()
|
2016-12-13 02:34:47 +03:00
|
|
|
if err != nil {
|
2017-05-02 22:31:35 +03:00
|
|
|
return nil, fmt.Errorf("unable to get best block info: %v", err)
|
2016-12-13 02:34:47 +03:00
|
|
|
}
|
|
|
|
|
2017-12-10 10:42:46 +03:00
|
|
|
isSynced, bestHeaderTimestamp, err := r.server.cc.wallet.IsSynced()
|
2016-12-13 02:34:47 +03:00
|
|
|
if err != nil {
|
2017-05-15 05:20:26 +03:00
|
|
|
return nil, fmt.Errorf("unable to sync PoV of the wallet "+
|
|
|
|
"with current best block in the main chain: %v", err)
|
2016-12-13 02:34:47 +03:00
|
|
|
}
|
|
|
|
|
2019-01-02 18:10:12 +03:00
|
|
|
network := normalizeNetwork(activeNetParams.Name)
|
|
|
|
activeChains := make([]*lnrpc.Chain, registeredChains.NumActiveChains())
|
2017-05-03 05:51:33 +03:00
|
|
|
for i, chain := range registeredChains.ActiveChains() {
|
2019-01-02 18:10:12 +03:00
|
|
|
activeChains[i] = &lnrpc.Chain{
|
|
|
|
Chain: chain.String(),
|
|
|
|
Network: network,
|
|
|
|
}
|
|
|
|
|
2017-05-03 05:51:33 +03:00
|
|
|
}
|
2017-06-06 01:18:06 +03:00
|
|
|
|
2018-01-07 08:54:14 +03:00
|
|
|
// Check if external IP addresses were provided to lnd and use them
|
|
|
|
// to set the URIs.
|
|
|
|
nodeAnn, err := r.server.genNodeAnnouncement(false)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to retrieve current fully signed "+
|
|
|
|
"node announcement: %v", err)
|
|
|
|
}
|
|
|
|
addrs := nodeAnn.Addresses
|
|
|
|
uris := make([]string, len(addrs))
|
|
|
|
for i, addr := range addrs {
|
|
|
|
uris[i] = fmt.Sprintf("%s@%s", encodedIDPub, addr.String())
|
|
|
|
}
|
|
|
|
|
2017-01-15 05:16:53 +03:00
|
|
|
// TODO(roasbeef): add synced height n stuff
|
2016-07-06 04:57:08 +03:00
|
|
|
return &lnrpc.GetInfoResponse{
|
2017-12-10 10:42:46 +03:00
|
|
|
IdentityPubkey: encodedIDPub,
|
|
|
|
NumPendingChannels: nPendingChannels,
|
|
|
|
NumActiveChannels: activeChannels,
|
2018-08-28 19:24:08 +03:00
|
|
|
NumInactiveChannels: inactiveChannels,
|
2017-12-10 10:42:46 +03:00
|
|
|
NumPeers: uint32(len(serverPeers)),
|
|
|
|
BlockHeight: uint32(bestHeight),
|
|
|
|
BlockHash: bestHash.String(),
|
|
|
|
SyncedToChain: isSynced,
|
2018-03-24 12:28:20 +03:00
|
|
|
Testnet: isTestnet(&activeNetParams),
|
2017-12-10 10:42:46 +03:00
|
|
|
Chains: activeChains,
|
|
|
|
Uris: uris,
|
|
|
|
Alias: nodeAnn.Alias.String(),
|
2018-12-21 19:34:56 +03:00
|
|
|
Color: routing.EncodeHexColor(nodeAnn.RGBColor),
|
2017-12-10 10:42:46 +03:00
|
|
|
BestHeaderTimestamp: int64(bestHeaderTimestamp),
|
2018-09-20 13:26:58 +03:00
|
|
|
Version: build.Version(),
|
2016-07-06 04:57:08 +03:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// ListPeers returns a verbose listing of all currently active peers.
|
|
|
|
func (r *rpcServer) ListPeers(ctx context.Context,
|
|
|
|
in *lnrpc.ListPeersRequest) (*lnrpc.ListPeersResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2016-06-29 21:31:29 +03:00
|
|
|
rpcsLog.Tracef("[listpeers] request")
|
2016-06-21 21:52:09 +03:00
|
|
|
|
|
|
|
serverPeers := r.server.Peers()
|
|
|
|
resp := &lnrpc.ListPeersResponse{
|
|
|
|
Peers: make([]*lnrpc.Peer, 0, len(serverPeers)),
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, serverPeer := range serverPeers {
|
2017-01-26 05:24:59 +03:00
|
|
|
var (
|
|
|
|
satSent int64
|
|
|
|
satRecv int64
|
|
|
|
)
|
|
|
|
|
|
|
|
// In order to display the total number of satoshis of outbound
|
|
|
|
// (sent) and inbound (recv'd) satoshis that have been
|
|
|
|
// transported through this peer, we'll sum up the sent/recv'd
|
2017-05-18 21:55:25 +03:00
|
|
|
// values for each of the active channels we have with the
|
2017-01-26 05:24:59 +03:00
|
|
|
// peer.
|
|
|
|
chans := serverPeer.ChannelSnapshots()
|
|
|
|
for _, c := range chans {
|
2017-11-11 02:24:49 +03:00
|
|
|
satSent += int64(c.TotalMSatSent.ToSatoshis())
|
|
|
|
satRecv += int64(c.TotalMSatReceived.ToSatoshis())
|
2017-01-26 05:24:59 +03:00
|
|
|
}
|
|
|
|
|
2019-03-23 05:57:03 +03:00
|
|
|
nodePub := serverPeer.PubKey()
|
|
|
|
|
|
|
|
// Retrieve the peer's sync type. If we don't currently have a
|
|
|
|
// syncer for the peer, then we'll default to a passive sync.
|
|
|
|
// This can happen if the RPC is called while a peer is
|
|
|
|
// initializing.
|
|
|
|
syncer, ok := r.server.authGossiper.SyncManager().GossipSyncer(
|
|
|
|
nodePub,
|
|
|
|
)
|
|
|
|
|
|
|
|
var lnrpcSyncType lnrpc.Peer_SyncType
|
|
|
|
if !ok {
|
|
|
|
rpcsLog.Warnf("Gossip syncer for peer=%x not found",
|
|
|
|
nodePub)
|
|
|
|
lnrpcSyncType = lnrpc.Peer_UNKNOWN_SYNC
|
|
|
|
} else {
|
|
|
|
syncType := syncer.SyncType()
|
|
|
|
switch syncType {
|
|
|
|
case discovery.ActiveSync:
|
|
|
|
lnrpcSyncType = lnrpc.Peer_ACTIVE_SYNC
|
|
|
|
case discovery.PassiveSync:
|
|
|
|
lnrpcSyncType = lnrpc.Peer_PASSIVE_SYNC
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("unhandled sync type %v",
|
|
|
|
syncType)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
peer := &lnrpc.Peer{
|
2019-03-23 05:57:03 +03:00
|
|
|
PubKey: hex.EncodeToString(nodePub[:]),
|
2016-10-27 00:31:41 +03:00
|
|
|
Address: serverPeer.conn.RemoteAddr().String(),
|
2018-04-03 08:16:10 +03:00
|
|
|
Inbound: serverPeer.inbound,
|
2016-10-27 00:31:41 +03:00
|
|
|
BytesRecv: atomic.LoadUint64(&serverPeer.bytesReceived),
|
|
|
|
BytesSent: atomic.LoadUint64(&serverPeer.bytesSent),
|
2017-01-26 05:24:59 +03:00
|
|
|
SatSent: satSent,
|
|
|
|
SatRecv: satRecv,
|
|
|
|
PingTime: serverPeer.PingTime(),
|
2019-03-23 05:57:03 +03:00
|
|
|
SyncType: lnrpcSyncType,
|
2016-06-21 21:52:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
resp.Peers = append(resp.Peers, peer)
|
|
|
|
}
|
|
|
|
|
2016-06-29 21:31:29 +03:00
|
|
|
rpcsLog.Debugf("[listpeers] yielded %v peers", serverPeers)
|
2016-06-21 21:52:09 +03:00
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2017-11-26 16:07:55 +03:00
|
|
|
// WalletBalance returns total unspent outputs(confirmed and unconfirmed), all
|
|
|
|
// confirmed unspent outputs and all unconfirmed unspent outputs under control
|
2016-06-21 21:46:27 +03:00
|
|
|
// by the wallet. This method can be modified by having the request specify
|
|
|
|
// only witness outputs should be factored into the final output sum.
|
2016-08-31 02:52:53 +03:00
|
|
|
// TODO(roasbeef): add async hooks into wallet balance changes
|
2016-06-21 21:46:27 +03:00
|
|
|
func (r *rpcServer) WalletBalance(ctx context.Context,
|
|
|
|
in *lnrpc.WalletBalanceRequest) (*lnrpc.WalletBalanceResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2017-11-26 16:07:55 +03:00
|
|
|
// Get total balance, from txs that have >= 0 confirmations.
|
2018-02-18 02:38:06 +03:00
|
|
|
totalBal, err := r.server.cc.wallet.ConfirmedBalance(0)
|
2016-08-13 01:53:18 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2016-06-21 21:46:27 +03:00
|
|
|
}
|
|
|
|
|
2017-11-26 16:07:55 +03:00
|
|
|
// Get confirmed balance, from txs that have >= 1 confirmations.
|
2019-05-24 15:17:48 +03:00
|
|
|
// TODO(halseth): get both unconfirmed and confirmed balance in one
|
|
|
|
// call, as this is racy.
|
2018-02-18 02:38:06 +03:00
|
|
|
confirmedBal, err := r.server.cc.wallet.ConfirmedBalance(1)
|
2017-11-26 16:07:55 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
// Get unconfirmed balance, from txs with 0 confirmations.
|
2017-11-26 16:07:55 +03:00
|
|
|
unconfirmedBal := totalBal - confirmedBal
|
|
|
|
|
2019-05-24 15:17:48 +03:00
|
|
|
rpcsLog.Debugf("[walletbalance] Total balance=%v (confirmed=%v, "+
|
|
|
|
"unconfirmed=%v)", totalBal, confirmedBal, unconfirmedBal)
|
2016-06-21 21:46:27 +03:00
|
|
|
|
2017-02-10 02:28:32 +03:00
|
|
|
return &lnrpc.WalletBalanceResponse{
|
2017-11-26 16:07:55 +03:00
|
|
|
TotalBalance: int64(totalBal),
|
|
|
|
ConfirmedBalance: int64(confirmedBal),
|
|
|
|
UnconfirmedBalance: int64(unconfirmedBal),
|
2017-02-10 02:28:32 +03:00
|
|
|
}, nil
|
2015-12-31 08:40:41 +03:00
|
|
|
}
|
2016-07-08 01:33:52 +03:00
|
|
|
|
2016-09-15 22:24:52 +03:00
|
|
|
// ChannelBalance returns the total available channel flow across all open
|
|
|
|
// channels in satoshis.
|
2016-09-15 21:59:51 +03:00
|
|
|
func (r *rpcServer) ChannelBalance(ctx context.Context,
|
|
|
|
in *lnrpc.ChannelBalanceRequest) (*lnrpc.ChannelBalanceResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2018-03-27 15:25:46 +03:00
|
|
|
openChannels, err := r.server.chanDB.FetchAllOpenChannels()
|
2016-11-12 02:48:15 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-03-27 15:25:46 +03:00
|
|
|
var balance btcutil.Amount
|
|
|
|
for _, channel := range openChannels {
|
|
|
|
balance += channel.LocalCommitment.LocalBalance.ToSatoshis()
|
|
|
|
}
|
|
|
|
|
|
|
|
pendingChannels, err := r.server.chanDB.FetchPendingChannels()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var pendingOpenBalance btcutil.Amount
|
|
|
|
for _, channel := range pendingChannels {
|
|
|
|
pendingOpenBalance += channel.LocalCommitment.LocalBalance.ToSatoshis()
|
2016-09-15 21:59:51 +03:00
|
|
|
}
|
|
|
|
|
2019-06-14 03:32:13 +03:00
|
|
|
rpcsLog.Debugf("[channelbalance] balance=%v pending-open=%v",
|
|
|
|
balance, pendingOpenBalance)
|
|
|
|
|
2018-04-01 02:40:50 +03:00
|
|
|
return &lnrpc.ChannelBalanceResponse{
|
|
|
|
Balance: int64(balance),
|
|
|
|
PendingOpenBalance: int64(pendingOpenBalance),
|
|
|
|
}, nil
|
2016-09-15 21:59:51 +03:00
|
|
|
}
|
|
|
|
|
2016-07-08 01:33:52 +03:00
|
|
|
// PendingChannels returns a list of all the channels that are currently
|
|
|
|
// considered "pending". A channel is pending if it has finished the funding
|
|
|
|
// workflow and is waiting for confirmations for the funding txn, or is in the
|
2017-05-05 02:13:13 +03:00
|
|
|
// process of closure, either initiated cooperatively or non-cooperatively.
|
2016-07-08 01:33:52 +03:00
|
|
|
func (r *rpcServer) PendingChannels(ctx context.Context,
|
2018-01-04 23:20:25 +03:00
|
|
|
in *lnrpc.PendingChannelsRequest) (*lnrpc.PendingChannelsResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2017-05-05 02:13:13 +03:00
|
|
|
rpcsLog.Debugf("[pendingchannels]")
|
2016-07-08 01:33:52 +03:00
|
|
|
|
2018-01-04 23:20:25 +03:00
|
|
|
resp := &lnrpc.PendingChannelsResponse{}
|
2017-05-05 02:13:13 +03:00
|
|
|
|
|
|
|
// First, we'll populate the response with all the channels that are
|
|
|
|
// soon to be opened. We can easily fetch this data from the database
|
|
|
|
// and map the db struct to the proto response.
|
|
|
|
pendingOpenChannels, err := r.server.chanDB.FetchPendingChannels()
|
|
|
|
if err != nil {
|
2018-01-17 07:21:18 +03:00
|
|
|
rpcsLog.Errorf("unable to fetch pending channels: %v", err)
|
2017-05-05 02:13:13 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
2018-01-04 23:20:25 +03:00
|
|
|
resp.PendingOpenChannels = make([]*lnrpc.PendingChannelsResponse_PendingOpenChannel,
|
2017-05-05 02:13:13 +03:00
|
|
|
len(pendingOpenChannels))
|
|
|
|
for i, pendingChan := range pendingOpenChannels {
|
|
|
|
pub := pendingChan.IdentityPub.SerializeCompressed()
|
2017-05-17 05:14:48 +03:00
|
|
|
|
|
|
|
// As this is required for display purposes, we'll calculate
|
|
|
|
// the weight of the commitment transaction. We also add on the
|
|
|
|
// estimated weight of the witness to calculate the weight of
|
|
|
|
// the transaction if it were to be immediately unilaterally
|
|
|
|
// broadcast.
|
|
|
|
// TODO(roasbeef): query for funding tx from wallet, display
|
|
|
|
// that also?
|
2017-11-11 02:24:49 +03:00
|
|
|
localCommitment := pendingChan.LocalCommitment
|
|
|
|
utx := btcutil.NewTx(localCommitment.CommitTx)
|
2017-05-17 05:14:48 +03:00
|
|
|
commitBaseWeight := blockchain.GetTransactionWeight(utx)
|
2019-01-16 17:47:43 +03:00
|
|
|
commitWeight := commitBaseWeight + input.WitnessCommitmentTxWeight
|
2017-05-17 05:14:48 +03:00
|
|
|
|
2018-01-04 23:20:25 +03:00
|
|
|
resp.PendingOpenChannels[i] = &lnrpc.PendingChannelsResponse_PendingOpenChannel{
|
|
|
|
Channel: &lnrpc.PendingChannelsResponse_PendingChannel{
|
2019-06-23 13:06:09 +03:00
|
|
|
RemoteNodePub: hex.EncodeToString(pub),
|
|
|
|
ChannelPoint: pendingChan.FundingOutpoint.String(),
|
|
|
|
Capacity: int64(pendingChan.Capacity),
|
|
|
|
LocalBalance: int64(localCommitment.LocalBalance.ToSatoshis()),
|
|
|
|
RemoteBalance: int64(localCommitment.RemoteBalance.ToSatoshis()),
|
|
|
|
LocalChanReserveSat: int64(pendingChan.LocalChanCfg.ChanReserve),
|
|
|
|
RemoteChanReserveSat: int64(pendingChan.RemoteChanCfg.ChanReserve),
|
2017-05-05 02:13:13 +03:00
|
|
|
},
|
2018-01-09 04:39:46 +03:00
|
|
|
CommitWeight: commitWeight,
|
|
|
|
CommitFee: int64(localCommitment.CommitFee),
|
|
|
|
FeePerKw: int64(localCommitment.FeePerKw),
|
2017-05-05 02:13:13 +03:00
|
|
|
// TODO(roasbeef): need to track confirmation height
|
2017-01-23 10:31:01 +03:00
|
|
|
}
|
2017-05-05 02:13:13 +03:00
|
|
|
}
|
|
|
|
|
2017-05-18 21:55:25 +03:00
|
|
|
_, currentHeight, err := r.server.cc.chainIO.GetBestBlock()
|
2017-05-05 02:13:13 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, we'll examine the channels that are soon to be closed so we
|
|
|
|
// can populate these fields within the response.
|
|
|
|
pendingCloseChannels, err := r.server.chanDB.FetchClosedChannels(true)
|
|
|
|
if err != nil {
|
2018-01-17 07:21:18 +03:00
|
|
|
rpcsLog.Errorf("unable to fetch closed channels: %v", err)
|
2017-05-05 02:13:13 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
for _, pendingClose := range pendingCloseChannels {
|
|
|
|
// First construct the channel struct itself, this will be
|
|
|
|
// needed regardless of how this channel was closed.
|
|
|
|
pub := pendingClose.RemotePub.SerializeCompressed()
|
|
|
|
chanPoint := pendingClose.ChanPoint
|
2018-01-04 23:20:25 +03:00
|
|
|
channel := &lnrpc.PendingChannelsResponse_PendingChannel{
|
2017-05-05 02:13:13 +03:00
|
|
|
RemoteNodePub: hex.EncodeToString(pub),
|
|
|
|
ChannelPoint: chanPoint.String(),
|
|
|
|
Capacity: int64(pendingClose.Capacity),
|
2017-05-15 05:20:26 +03:00
|
|
|
LocalBalance: int64(pendingClose.SettledBalance),
|
2017-05-05 02:13:13 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
closeTXID := pendingClose.ClosingTXID.String()
|
|
|
|
|
|
|
|
switch pendingClose.CloseType {
|
|
|
|
|
|
|
|
// If the channel was closed cooperatively, then we'll only
|
|
|
|
// need to tack on the closing txid.
|
2018-05-24 11:29:20 +03:00
|
|
|
// TODO(halseth): remove. After recent changes, a coop closed
|
|
|
|
// channel should never be in the "pending close" state.
|
|
|
|
// Keeping for now to let someone that upgraded in the middle
|
|
|
|
// of a close let their closing tx confirm.
|
2017-05-05 02:13:13 +03:00
|
|
|
case channeldb.CooperativeClose:
|
|
|
|
resp.PendingClosingChannels = append(
|
|
|
|
resp.PendingClosingChannels,
|
2018-01-04 23:20:25 +03:00
|
|
|
&lnrpc.PendingChannelsResponse_ClosedChannel{
|
2017-05-05 02:13:13 +03:00
|
|
|
Channel: channel,
|
|
|
|
ClosingTxid: closeTXID,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
2017-05-15 04:51:26 +03:00
|
|
|
resp.TotalLimboBalance += channel.LocalBalance
|
|
|
|
|
2017-05-05 02:13:13 +03:00
|
|
|
// If the channel was force closed, then we'll need to query
|
|
|
|
// the utxoNursery for additional information.
|
2018-03-27 15:25:46 +03:00
|
|
|
// TODO(halseth): distinguish remote and local case?
|
|
|
|
case channeldb.LocalForceClose, channeldb.RemoteForceClose:
|
2018-01-04 23:20:25 +03:00
|
|
|
forceClose := &lnrpc.PendingChannelsResponse_ForceClosedChannel{
|
2017-05-05 02:13:13 +03:00
|
|
|
Channel: channel,
|
|
|
|
ClosingTxid: closeTXID,
|
|
|
|
}
|
|
|
|
|
2018-09-07 17:05:57 +03:00
|
|
|
// Fetch reports from both nursery and resolvers. At the
|
|
|
|
// moment this is not an atomic snapshot. This is
|
|
|
|
// planned to be resolved when the nursery is removed
|
|
|
|
// and channel arbitrator will be the single source for
|
|
|
|
// these kind of reports.
|
2019-01-16 21:28:45 +03:00
|
|
|
err := r.nurseryPopulateForceCloseResp(
|
|
|
|
&chanPoint, currentHeight, forceClose,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2016-07-08 01:33:52 +03:00
|
|
|
}
|
2017-05-05 02:13:13 +03:00
|
|
|
|
2018-09-07 17:05:57 +03:00
|
|
|
err = r.arbitratorPopulateForceCloseResp(
|
|
|
|
&chanPoint, currentHeight, forceClose,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-01-16 21:28:45 +03:00
|
|
|
resp.TotalLimboBalance += int64(forceClose.LimboBalance)
|
2017-05-05 02:13:13 +03:00
|
|
|
|
|
|
|
resp.PendingForceClosingChannels = append(
|
|
|
|
resp.PendingForceClosingChannels,
|
|
|
|
forceClose,
|
|
|
|
)
|
2016-07-08 01:33:52 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-27 15:25:46 +03:00
|
|
|
// We'll also fetch all channels that are open, but have had their
|
|
|
|
// commitment broadcasted, meaning they are waiting for the closing
|
|
|
|
// transaction to confirm.
|
|
|
|
waitingCloseChans, err := r.server.chanDB.FetchWaitingCloseChannels()
|
|
|
|
if err != nil {
|
|
|
|
rpcsLog.Errorf("unable to fetch channels waiting close: %v",
|
|
|
|
err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, waitingClose := range waitingCloseChans {
|
|
|
|
pub := waitingClose.IdentityPub.SerializeCompressed()
|
|
|
|
chanPoint := waitingClose.FundingOutpoint
|
|
|
|
channel := &lnrpc.PendingChannelsResponse_PendingChannel{
|
|
|
|
RemoteNodePub: hex.EncodeToString(pub),
|
|
|
|
ChannelPoint: chanPoint.String(),
|
|
|
|
Capacity: int64(waitingClose.Capacity),
|
|
|
|
LocalBalance: int64(waitingClose.LocalCommitment.LocalBalance.ToSatoshis()),
|
|
|
|
}
|
|
|
|
|
|
|
|
// A close tx has been broadcasted, all our balance will be in
|
|
|
|
// limbo until it confirms.
|
|
|
|
resp.WaitingCloseChannels = append(
|
|
|
|
resp.WaitingCloseChannels,
|
|
|
|
&lnrpc.PendingChannelsResponse_WaitingCloseChannel{
|
|
|
|
Channel: channel,
|
|
|
|
LimboBalance: channel.LocalBalance,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
resp.TotalLimboBalance += channel.LocalBalance
|
|
|
|
}
|
|
|
|
|
2017-05-05 02:13:13 +03:00
|
|
|
return resp, nil
|
2016-07-08 01:33:52 +03:00
|
|
|
}
|
2016-07-13 03:46:25 +03:00
|
|
|
|
2018-09-07 17:05:57 +03:00
|
|
|
// arbitratorPopulateForceCloseResp populates the pending channels response
|
|
|
|
// message with channel resolution information from the contract resolvers.
|
|
|
|
func (r *rpcServer) arbitratorPopulateForceCloseResp(chanPoint *wire.OutPoint,
|
|
|
|
currentHeight int32,
|
|
|
|
forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel) error {
|
|
|
|
|
|
|
|
// Query for contract resolvers state.
|
|
|
|
arbitrator, err := r.server.chainArb.GetChannelArbitrator(*chanPoint)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
reports := arbitrator.Report()
|
|
|
|
|
|
|
|
for _, report := range reports {
|
|
|
|
htlc := &lnrpc.PendingHTLC{
|
|
|
|
Incoming: report.Incoming,
|
|
|
|
Amount: int64(report.Amount),
|
|
|
|
Outpoint: report.Outpoint.String(),
|
|
|
|
MaturityHeight: report.MaturityHeight,
|
|
|
|
Stage: report.Stage,
|
|
|
|
}
|
|
|
|
|
|
|
|
if htlc.MaturityHeight != 0 {
|
|
|
|
htlc.BlocksTilMaturity =
|
|
|
|
int32(htlc.MaturityHeight) - currentHeight
|
|
|
|
}
|
|
|
|
|
|
|
|
forceClose.LimboBalance += int64(report.LimboBalance)
|
|
|
|
forceClose.RecoveredBalance += int64(report.RecoveredBalance)
|
|
|
|
|
|
|
|
forceClose.PendingHtlcs = append(forceClose.PendingHtlcs, htlc)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-01-16 21:28:45 +03:00
|
|
|
// nurseryPopulateForceCloseResp populates the pending channels response
|
|
|
|
// message with contract resolution information from utxonursery.
|
|
|
|
func (r *rpcServer) nurseryPopulateForceCloseResp(chanPoint *wire.OutPoint,
|
|
|
|
currentHeight int32,
|
|
|
|
forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel) error {
|
|
|
|
|
|
|
|
// Query for the maturity state for this force closed channel. If we
|
|
|
|
// didn't have any time-locked outputs, then the nursery may not know of
|
|
|
|
// the contract.
|
|
|
|
nurseryInfo, err := r.server.utxoNursery.NurseryReport(chanPoint)
|
|
|
|
if err == ErrContractNotFound {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to obtain "+
|
|
|
|
"nursery report for ChannelPoint(%v): %v",
|
|
|
|
chanPoint, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the nursery knows of this channel, then we can populate
|
|
|
|
// information detailing exactly how much funds are time locked and also
|
|
|
|
// the height in which we can ultimately sweep the funds into the
|
|
|
|
// wallet.
|
|
|
|
forceClose.LimboBalance = int64(nurseryInfo.limboBalance)
|
|
|
|
forceClose.RecoveredBalance = int64(nurseryInfo.recoveredBalance)
|
|
|
|
forceClose.MaturityHeight = nurseryInfo.maturityHeight
|
|
|
|
|
|
|
|
// If the transaction has been confirmed, then we can compute how many
|
|
|
|
// blocks it has left.
|
|
|
|
if forceClose.MaturityHeight != 0 {
|
|
|
|
forceClose.BlocksTilMaturity =
|
|
|
|
int32(forceClose.MaturityHeight) -
|
|
|
|
currentHeight
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, htlcReport := range nurseryInfo.htlcs {
|
|
|
|
// TODO(conner) set incoming flag appropriately after handling
|
|
|
|
// incoming incubation
|
|
|
|
htlc := &lnrpc.PendingHTLC{
|
|
|
|
Incoming: false,
|
|
|
|
Amount: int64(htlcReport.amount),
|
|
|
|
Outpoint: htlcReport.outpoint.String(),
|
|
|
|
MaturityHeight: htlcReport.maturityHeight,
|
|
|
|
Stage: htlcReport.stage,
|
|
|
|
}
|
|
|
|
|
|
|
|
if htlc.MaturityHeight != 0 {
|
|
|
|
htlc.BlocksTilMaturity =
|
|
|
|
int32(htlc.MaturityHeight) -
|
|
|
|
currentHeight
|
|
|
|
}
|
|
|
|
|
|
|
|
forceClose.PendingHtlcs = append(forceClose.PendingHtlcs,
|
|
|
|
htlc)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-06-15 03:14:31 +03:00
|
|
|
// ClosedChannels returns a list of all the channels have been closed.
|
2018-05-24 12:35:34 +03:00
|
|
|
// This does not include channels that are still in the process of closing.
|
|
|
|
func (r *rpcServer) ClosedChannels(ctx context.Context,
|
2018-06-15 03:14:31 +03:00
|
|
|
in *lnrpc.ClosedChannelsRequest) (*lnrpc.ClosedChannelsResponse,
|
2018-05-24 12:35:34 +03:00
|
|
|
error) {
|
|
|
|
|
|
|
|
// Show all channels when no filter flags are set.
|
2018-06-15 03:14:31 +03:00
|
|
|
filterResults := in.Cooperative || in.LocalForce ||
|
2018-05-29 12:26:47 +03:00
|
|
|
in.RemoteForce || in.Breach || in.FundingCanceled ||
|
|
|
|
in.Abandoned
|
2018-05-24 12:35:34 +03:00
|
|
|
|
|
|
|
resp := &lnrpc.ClosedChannelsResponse{}
|
|
|
|
|
|
|
|
dbChannels, err := r.server.chanDB.FetchClosedChannels(false)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-06-15 03:14:31 +03:00
|
|
|
// In order to make the response easier to parse for clients, we'll
|
|
|
|
// sort the set of closed channels by their closing height before
|
|
|
|
// serializing the proto response.
|
|
|
|
sort.Slice(dbChannels, func(i, j int) bool {
|
|
|
|
return dbChannels[i].CloseHeight < dbChannels[j].CloseHeight
|
|
|
|
})
|
|
|
|
|
2018-05-24 12:35:34 +03:00
|
|
|
for _, dbChannel := range dbChannels {
|
|
|
|
if dbChannel.IsPending {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
switch dbChannel.CloseType {
|
|
|
|
case channeldb.CooperativeClose:
|
|
|
|
if filterResults && !in.Cooperative {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
case channeldb.LocalForceClose:
|
|
|
|
if filterResults && !in.LocalForce {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
case channeldb.RemoteForceClose:
|
|
|
|
if filterResults && !in.RemoteForce {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
case channeldb.BreachClose:
|
|
|
|
if filterResults && !in.Breach {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
case channeldb.FundingCanceled:
|
|
|
|
if filterResults && !in.FundingCanceled {
|
|
|
|
continue
|
|
|
|
}
|
2018-05-29 12:26:47 +03:00
|
|
|
case channeldb.Abandoned:
|
|
|
|
if filterResults && !in.Abandoned {
|
|
|
|
continue
|
|
|
|
}
|
2018-05-24 12:35:34 +03:00
|
|
|
}
|
|
|
|
|
2018-09-28 02:34:43 +03:00
|
|
|
channel := createRPCClosedChannel(dbChannel)
|
2018-05-24 12:35:34 +03:00
|
|
|
resp.Channels = append(resp.Channels, channel)
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2017-07-28 02:39:49 +03:00
|
|
|
// ListChannels returns a description of all the open channels that this node
|
|
|
|
// is a participant in.
|
2016-09-26 06:04:03 +03:00
|
|
|
func (r *rpcServer) ListChannels(ctx context.Context,
|
|
|
|
in *lnrpc.ListChannelsRequest) (*lnrpc.ListChannelsResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2018-03-13 22:11:19 +03:00
|
|
|
if in.ActiveOnly && in.InactiveOnly {
|
|
|
|
return nil, fmt.Errorf("either `active_only` or " +
|
|
|
|
"`inactive_only` can be set, but not both")
|
|
|
|
}
|
|
|
|
|
|
|
|
if in.PublicOnly && in.PrivateOnly {
|
|
|
|
return nil, fmt.Errorf("either `public_only` or " +
|
|
|
|
"`private_only` can be set, but not both")
|
|
|
|
}
|
|
|
|
|
2016-09-26 06:04:03 +03:00
|
|
|
resp := &lnrpc.ListChannelsResponse{}
|
|
|
|
|
2016-12-27 08:50:19 +03:00
|
|
|
graph := r.server.chanDB.ChannelGraph()
|
|
|
|
|
2018-03-27 15:25:46 +03:00
|
|
|
dbChannels, err := r.server.chanDB.FetchAllOpenChannels()
|
2016-10-27 01:09:01 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-02-25 18:41:28 +03:00
|
|
|
rpcsLog.Debugf("[listchannels] fetched %v channels from DB",
|
2016-10-27 01:09:01 +03:00
|
|
|
len(dbChannels))
|
|
|
|
|
|
|
|
for _, dbChannel := range dbChannels {
|
2017-03-09 01:45:58 +03:00
|
|
|
nodePub := dbChannel.IdentityPub
|
2017-07-31 00:25:03 +03:00
|
|
|
chanPoint := dbChannel.FundingOutpoint
|
2016-12-27 08:50:19 +03:00
|
|
|
|
2017-03-09 01:45:58 +03:00
|
|
|
var peerOnline bool
|
2017-08-09 01:49:32 +03:00
|
|
|
if _, err := r.server.FindPeer(nodePub); err == nil {
|
2017-03-09 01:45:58 +03:00
|
|
|
peerOnline = true
|
|
|
|
}
|
|
|
|
|
2017-11-19 03:30:51 +03:00
|
|
|
channelID := lnwire.NewChanIDFromOutPoint(&chanPoint)
|
|
|
|
var linkActive bool
|
2017-12-06 04:51:06 +03:00
|
|
|
if link, err := r.server.htlcSwitch.GetLink(channelID); err == nil {
|
|
|
|
// A channel is only considered active if it is known
|
|
|
|
// by the switch *and* able to forward
|
|
|
|
// incoming/outgoing payments.
|
|
|
|
linkActive = link.EligibleToForward()
|
2017-11-19 03:30:51 +03:00
|
|
|
}
|
|
|
|
|
2018-03-13 22:11:19 +03:00
|
|
|
// Next, we'll determine whether we should add this channel to
|
|
|
|
// our list depending on the type of channels requested to us.
|
|
|
|
isActive := peerOnline && linkActive
|
2018-09-28 02:34:43 +03:00
|
|
|
channel := createRPCOpenChannel(r, graph, dbChannel, isActive)
|
2018-03-13 22:11:19 +03:00
|
|
|
|
|
|
|
// We'll only skip returning this channel if we were requested
|
|
|
|
// for a specific kind and this channel doesn't satisfy it.
|
|
|
|
switch {
|
|
|
|
case in.ActiveOnly && !isActive:
|
|
|
|
continue
|
|
|
|
case in.InactiveOnly && isActive:
|
|
|
|
continue
|
2018-09-28 02:34:43 +03:00
|
|
|
case in.PublicOnly && channel.Private:
|
2018-03-13 22:11:19 +03:00
|
|
|
continue
|
2018-09-28 02:34:43 +03:00
|
|
|
case in.PrivateOnly && !channel.Private:
|
2018-03-13 22:11:19 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-09-28 02:34:43 +03:00
|
|
|
resp.Channels = append(resp.Channels, channel)
|
|
|
|
}
|
2016-10-27 01:09:01 +03:00
|
|
|
|
2018-09-28 02:34:43 +03:00
|
|
|
return resp, nil
|
|
|
|
}
|
2018-12-07 03:49:05 +03:00
|
|
|
|
2018-09-28 02:34:43 +03:00
|
|
|
// createRPCOpenChannel creates an *lnrpc.Channel from the *channeldb.Channel.
|
|
|
|
func createRPCOpenChannel(r *rpcServer, graph *channeldb.ChannelGraph,
|
|
|
|
dbChannel *channeldb.OpenChannel, isActive bool) *lnrpc.Channel {
|
|
|
|
|
|
|
|
nodePub := dbChannel.IdentityPub
|
|
|
|
nodeID := hex.EncodeToString(nodePub.SerializeCompressed())
|
|
|
|
chanPoint := dbChannel.FundingOutpoint
|
|
|
|
|
|
|
|
// Next, we'll determine whether the channel is public or not.
|
|
|
|
isPublic := dbChannel.ChannelFlags&lnwire.FFAnnounceChannel != 0
|
|
|
|
|
|
|
|
// As this is required for display purposes, we'll calculate
|
|
|
|
// the weight of the commitment transaction. We also add on the
|
|
|
|
// estimated weight of the witness to calculate the weight of
|
|
|
|
// the transaction if it were to be immediately unilaterally
|
|
|
|
// broadcast.
|
|
|
|
localCommit := dbChannel.LocalCommitment
|
|
|
|
utx := btcutil.NewTx(localCommit.CommitTx)
|
|
|
|
commitBaseWeight := blockchain.GetTransactionWeight(utx)
|
|
|
|
commitWeight := commitBaseWeight + input.WitnessCommitmentTxWeight
|
|
|
|
|
|
|
|
localBalance := localCommit.LocalBalance
|
|
|
|
remoteBalance := localCommit.RemoteBalance
|
|
|
|
|
|
|
|
// As an artifact of our usage of mSAT internally, either party
|
|
|
|
// may end up in a state where they're holding a fractional
|
|
|
|
// amount of satoshis which can't be expressed within the
|
|
|
|
// actual commitment output. Since we round down when going
|
|
|
|
// from mSAT -> SAT, we may at any point be adding an
|
|
|
|
// additional SAT to miners fees. As a result, we display a
|
|
|
|
// commitment fee that accounts for this externally.
|
|
|
|
var sumOutputs btcutil.Amount
|
|
|
|
for _, txOut := range localCommit.CommitTx.TxOut {
|
|
|
|
sumOutputs += btcutil.Amount(txOut.Value)
|
|
|
|
}
|
|
|
|
externalCommitFee := dbChannel.Capacity - sumOutputs
|
|
|
|
|
|
|
|
channel := &lnrpc.Channel{
|
|
|
|
Active: isActive,
|
|
|
|
Private: !isPublic,
|
|
|
|
RemotePubkey: nodeID,
|
|
|
|
ChannelPoint: chanPoint.String(),
|
2019-03-22 22:25:31 +03:00
|
|
|
ChanId: dbChannel.ShortChannelID.ToUint64(),
|
2018-09-28 02:34:43 +03:00
|
|
|
Capacity: int64(dbChannel.Capacity),
|
|
|
|
LocalBalance: int64(localBalance.ToSatoshis()),
|
|
|
|
RemoteBalance: int64(remoteBalance.ToSatoshis()),
|
|
|
|
CommitFee: int64(externalCommitFee),
|
|
|
|
CommitWeight: commitWeight,
|
|
|
|
FeePerKw: int64(localCommit.FeePerKw),
|
|
|
|
TotalSatoshisSent: int64(dbChannel.TotalMSatSent.ToSatoshis()),
|
|
|
|
TotalSatoshisReceived: int64(dbChannel.TotalMSatReceived.ToSatoshis()),
|
|
|
|
NumUpdates: localCommit.CommitHeight,
|
|
|
|
PendingHtlcs: make([]*lnrpc.HTLC, len(localCommit.Htlcs)),
|
|
|
|
CsvDelay: uint32(dbChannel.LocalChanCfg.CsvDelay),
|
|
|
|
Initiator: dbChannel.IsInitiator,
|
2018-12-10 07:12:24 +03:00
|
|
|
ChanStatusFlags: dbChannel.ChanStatus().String(),
|
2019-06-23 13:06:09 +03:00
|
|
|
LocalChanReserveSat: int64(dbChannel.LocalChanCfg.ChanReserve),
|
|
|
|
RemoteChanReserveSat: int64(dbChannel.RemoteChanCfg.ChanReserve),
|
2018-09-28 02:34:43 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
for i, htlc := range localCommit.Htlcs {
|
|
|
|
var rHash [32]byte
|
|
|
|
copy(rHash[:], htlc.RHash[:])
|
|
|
|
channel.PendingHtlcs[i] = &lnrpc.HTLC{
|
|
|
|
Incoming: htlc.Incoming,
|
|
|
|
Amount: int64(htlc.Amt.ToSatoshis()),
|
|
|
|
HashLock: rHash[:],
|
|
|
|
ExpirationHeight: htlc.RefundTimeout,
|
2016-09-26 06:04:03 +03:00
|
|
|
}
|
|
|
|
|
2018-09-28 02:34:43 +03:00
|
|
|
// Add the Pending Htlc Amount to UnsettledBalance field.
|
|
|
|
channel.UnsettledBalance += channel.PendingHtlcs[i].Amount
|
2016-09-26 06:04:03 +03:00
|
|
|
}
|
|
|
|
|
2018-09-28 02:34:43 +03:00
|
|
|
return channel
|
|
|
|
}
|
|
|
|
|
|
|
|
// createRPCClosedChannel creates an *lnrpc.ClosedChannelSummary from a
|
|
|
|
// *channeldb.ChannelCloseSummary.
|
|
|
|
func createRPCClosedChannel(
|
|
|
|
dbChannel *channeldb.ChannelCloseSummary) *lnrpc.ChannelCloseSummary {
|
|
|
|
|
|
|
|
nodePub := dbChannel.RemotePub
|
|
|
|
nodeID := hex.EncodeToString(nodePub.SerializeCompressed())
|
|
|
|
|
|
|
|
var closeType lnrpc.ChannelCloseSummary_ClosureType
|
|
|
|
switch dbChannel.CloseType {
|
|
|
|
case channeldb.CooperativeClose:
|
|
|
|
closeType = lnrpc.ChannelCloseSummary_COOPERATIVE_CLOSE
|
|
|
|
case channeldb.LocalForceClose:
|
|
|
|
closeType = lnrpc.ChannelCloseSummary_LOCAL_FORCE_CLOSE
|
|
|
|
case channeldb.RemoteForceClose:
|
|
|
|
closeType = lnrpc.ChannelCloseSummary_REMOTE_FORCE_CLOSE
|
|
|
|
case channeldb.BreachClose:
|
|
|
|
closeType = lnrpc.ChannelCloseSummary_BREACH_CLOSE
|
|
|
|
case channeldb.FundingCanceled:
|
|
|
|
closeType = lnrpc.ChannelCloseSummary_FUNDING_CANCELED
|
|
|
|
case channeldb.Abandoned:
|
|
|
|
closeType = lnrpc.ChannelCloseSummary_ABANDONED
|
|
|
|
}
|
|
|
|
|
|
|
|
return &lnrpc.ChannelCloseSummary{
|
|
|
|
Capacity: int64(dbChannel.Capacity),
|
|
|
|
RemotePubkey: nodeID,
|
|
|
|
CloseHeight: dbChannel.CloseHeight,
|
|
|
|
CloseType: closeType,
|
|
|
|
ChannelPoint: dbChannel.ChanPoint.String(),
|
|
|
|
ChanId: dbChannel.ShortChanID.ToUint64(),
|
|
|
|
SettledBalance: int64(dbChannel.SettledBalance),
|
|
|
|
TimeLockedBalance: int64(dbChannel.TimeLockedBalance),
|
|
|
|
ChainHash: dbChannel.ChainHash.String(),
|
|
|
|
ClosingTxHash: dbChannel.ClosingTXID.String(),
|
|
|
|
}
|
2016-09-26 06:04:03 +03:00
|
|
|
}
|
|
|
|
|
2019-01-23 05:28:35 +03:00
|
|
|
// SubscribeChannelEvents returns a uni-directional stream (server -> client)
|
|
|
|
// for notifying the client of newly active, inactive or closed channels.
|
|
|
|
func (r *rpcServer) SubscribeChannelEvents(req *lnrpc.ChannelEventSubscription,
|
|
|
|
updateStream lnrpc.Lightning_SubscribeChannelEventsServer) error {
|
|
|
|
|
|
|
|
channelEventSub, err := r.server.channelNotifier.SubscribeChannelEvents()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that the resources for the client is cleaned up once either
|
|
|
|
// the server, or client exits.
|
|
|
|
defer channelEventSub.Cancel()
|
|
|
|
|
|
|
|
graph := r.server.chanDB.ChannelGraph()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
// A new update has been sent by the channel router, we'll
|
|
|
|
// marshal it into the form expected by the gRPC client, then
|
|
|
|
// send it off to the client(s).
|
|
|
|
case e := <-channelEventSub.Updates():
|
|
|
|
var update *lnrpc.ChannelEventUpdate
|
|
|
|
switch event := e.(type) {
|
|
|
|
case channelnotifier.OpenChannelEvent:
|
|
|
|
channel := createRPCOpenChannel(r, graph,
|
|
|
|
event.Channel, true)
|
|
|
|
update = &lnrpc.ChannelEventUpdate{
|
|
|
|
Type: lnrpc.ChannelEventUpdate_OPEN_CHANNEL,
|
|
|
|
Channel: &lnrpc.ChannelEventUpdate_OpenChannel{
|
|
|
|
OpenChannel: channel,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
case channelnotifier.ClosedChannelEvent:
|
|
|
|
closedChannel := createRPCClosedChannel(event.CloseSummary)
|
|
|
|
update = &lnrpc.ChannelEventUpdate{
|
|
|
|
Type: lnrpc.ChannelEventUpdate_CLOSED_CHANNEL,
|
|
|
|
Channel: &lnrpc.ChannelEventUpdate_ClosedChannel{
|
|
|
|
ClosedChannel: closedChannel,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
case channelnotifier.ActiveChannelEvent:
|
|
|
|
update = &lnrpc.ChannelEventUpdate{
|
|
|
|
Type: lnrpc.ChannelEventUpdate_ACTIVE_CHANNEL,
|
|
|
|
Channel: &lnrpc.ChannelEventUpdate_ActiveChannel{
|
|
|
|
ActiveChannel: &lnrpc.ChannelPoint{
|
|
|
|
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
|
|
|
|
FundingTxidBytes: event.ChannelPoint.Hash[:],
|
|
|
|
},
|
|
|
|
OutputIndex: event.ChannelPoint.Index,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
case channelnotifier.InactiveChannelEvent:
|
|
|
|
update = &lnrpc.ChannelEventUpdate{
|
|
|
|
Type: lnrpc.ChannelEventUpdate_INACTIVE_CHANNEL,
|
|
|
|
Channel: &lnrpc.ChannelEventUpdate_InactiveChannel{
|
|
|
|
InactiveChannel: &lnrpc.ChannelPoint{
|
|
|
|
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
|
|
|
|
FundingTxidBytes: event.ChannelPoint.Hash[:],
|
|
|
|
},
|
|
|
|
OutputIndex: event.ChannelPoint.Index,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return fmt.Errorf("unexpected channel event update: %v", event)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := updateStream.Send(update); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
case <-r.quit:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
// paymentStream enables different types of payment streams, such as:
|
|
|
|
// lnrpc.Lightning_SendPaymentServer and lnrpc.Lightning_SendToRouteServer to
|
|
|
|
// execute sendPayment. We use this struct as a sort of bridge to enable code
|
|
|
|
// re-use between SendPayment and SendToRoute.
|
2018-05-01 11:17:55 +03:00
|
|
|
type paymentStream struct {
|
2018-06-07 06:40:28 +03:00
|
|
|
recv func() (*rpcPaymentRequest, error)
|
2018-05-01 11:17:55 +03:00
|
|
|
send func(*lnrpc.SendResponse) error
|
|
|
|
}
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
// rpcPaymentRequest wraps lnrpc.SendRequest so that routes from
|
|
|
|
// lnrpc.SendToRouteRequest can be passed to sendPayment.
|
|
|
|
type rpcPaymentRequest struct {
|
2018-05-01 11:17:55 +03:00
|
|
|
*lnrpc.SendRequest
|
2018-08-08 12:09:30 +03:00
|
|
|
route *route.Route
|
2018-05-01 11:17:55 +03:00
|
|
|
}
|
|
|
|
|
2018-04-19 17:27:37 +03:00
|
|
|
// calculateFeeLimit returns the fee limit in millisatoshis. If a percentage
|
|
|
|
// based fee limit has been requested, we'll factor in the ratio provided with
|
|
|
|
// the amount of the payment.
|
|
|
|
func calculateFeeLimit(feeLimit *lnrpc.FeeLimit,
|
|
|
|
amount lnwire.MilliSatoshi) lnwire.MilliSatoshi {
|
|
|
|
|
|
|
|
switch feeLimit.GetLimit().(type) {
|
|
|
|
case *lnrpc.FeeLimit_Fixed:
|
|
|
|
return lnwire.NewMSatFromSatoshis(
|
|
|
|
btcutil.Amount(feeLimit.GetFixed()),
|
|
|
|
)
|
|
|
|
case *lnrpc.FeeLimit_Percent:
|
|
|
|
return amount * lnwire.MilliSatoshi(feeLimit.GetPercent()) / 100
|
|
|
|
default:
|
|
|
|
// If a fee limit was not specified, we'll use the payment's
|
|
|
|
// amount as an upper bound in order to avoid payment attempts
|
|
|
|
// from incurring fees higher than the payment amount itself.
|
|
|
|
return amount
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-13 03:46:25 +03:00
|
|
|
// SendPayment dispatches a bi-directional streaming RPC for sending payments
|
|
|
|
// through the Lightning Network. A single RPC invocation creates a persistent
|
|
|
|
// bi-directional stream allowing clients to rapidly send payments through the
|
|
|
|
// Lightning Network with a single persistent connection.
|
2018-05-01 11:17:55 +03:00
|
|
|
func (r *rpcServer) SendPayment(stream lnrpc.Lightning_SendPaymentServer) error {
|
2018-10-15 22:13:37 +03:00
|
|
|
var lock sync.Mutex
|
|
|
|
|
2018-05-01 11:17:55 +03:00
|
|
|
return r.sendPayment(&paymentStream{
|
2018-06-07 06:40:28 +03:00
|
|
|
recv: func() (*rpcPaymentRequest, error) {
|
2018-05-01 11:17:55 +03:00
|
|
|
req, err := stream.Recv()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
return &rpcPaymentRequest{
|
2018-05-01 11:17:55 +03:00
|
|
|
SendRequest: req,
|
|
|
|
}, nil
|
|
|
|
},
|
2018-10-15 22:13:37 +03:00
|
|
|
send: func(r *lnrpc.SendResponse) error {
|
|
|
|
// Calling stream.Send concurrently is not safe.
|
|
|
|
lock.Lock()
|
|
|
|
defer lock.Unlock()
|
|
|
|
return stream.Send(r)
|
|
|
|
},
|
2018-05-01 11:17:55 +03:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// SendToRoute dispatches a bi-directional streaming RPC for sending payments
|
|
|
|
// through the Lightning Network via predefined routes passed in. A single RPC
|
|
|
|
// invocation creates a persistent bi-directional stream allowing clients to
|
2018-06-07 06:40:28 +03:00
|
|
|
// rapidly send payments through the Lightning Network with a single persistent
|
|
|
|
// connection.
|
2018-05-01 11:17:55 +03:00
|
|
|
func (r *rpcServer) SendToRoute(stream lnrpc.Lightning_SendToRouteServer) error {
|
2018-10-15 22:13:37 +03:00
|
|
|
var lock sync.Mutex
|
|
|
|
|
2018-05-01 11:17:55 +03:00
|
|
|
return r.sendPayment(&paymentStream{
|
2018-06-07 06:40:28 +03:00
|
|
|
recv: func() (*rpcPaymentRequest, error) {
|
2018-05-01 11:17:55 +03:00
|
|
|
req, err := stream.Recv()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-03-14 17:13:45 +03:00
|
|
|
return r.unmarshallSendToRouteRequest(req)
|
2018-05-01 11:17:55 +03:00
|
|
|
},
|
2018-10-15 22:13:37 +03:00
|
|
|
send: func(r *lnrpc.SendResponse) error {
|
|
|
|
// Calling stream.Send concurrently is not safe.
|
|
|
|
lock.Lock()
|
|
|
|
defer lock.Unlock()
|
|
|
|
return stream.Send(r)
|
|
|
|
},
|
2018-05-01 11:17:55 +03:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-01-21 14:50:36 +03:00
|
|
|
// unmarshallSendToRouteRequest unmarshalls an rpc sendtoroute request
|
2019-03-14 17:13:45 +03:00
|
|
|
func (r *rpcServer) unmarshallSendToRouteRequest(
|
|
|
|
req *lnrpc.SendToRouteRequest) (*rpcPaymentRequest, error) {
|
2019-01-21 14:50:36 +03:00
|
|
|
|
2018-08-08 12:09:30 +03:00
|
|
|
if req.Route == nil {
|
|
|
|
return nil, fmt.Errorf("unable to send, no route provided")
|
2019-01-21 14:50:36 +03:00
|
|
|
}
|
|
|
|
|
2019-03-14 17:13:45 +03:00
|
|
|
route, err := r.routerBackend.UnmarshallRoute(req.Route)
|
2018-08-08 12:09:30 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2019-01-21 14:50:36 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return &rpcPaymentRequest{
|
|
|
|
SendRequest: &lnrpc.SendRequest{
|
2019-02-07 02:48:23 +03:00
|
|
|
PaymentHash: req.PaymentHash,
|
|
|
|
PaymentHashString: req.PaymentHashString,
|
2019-01-21 14:50:36 +03:00
|
|
|
},
|
2018-08-08 12:09:30 +03:00
|
|
|
route: route,
|
2019-01-21 14:50:36 +03:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
// rpcPaymentIntent is a small wrapper struct around the of values we can
|
|
|
|
// receive from a client over RPC if they wish to send a payment. We'll either
|
|
|
|
// extract these fields from a payment request (which may include routing
|
|
|
|
// hints), or we'll get a fully populated route from the user that we'll pass
|
|
|
|
// directly to the channel router for dispatching.
|
|
|
|
type rpcPaymentIntent struct {
|
2019-02-01 15:53:27 +03:00
|
|
|
msat lnwire.MilliSatoshi
|
|
|
|
feeLimit lnwire.MilliSatoshi
|
2018-11-04 13:11:48 +03:00
|
|
|
cltvLimit *uint32
|
2019-04-05 18:36:11 +03:00
|
|
|
dest route.Vertex
|
2019-02-01 15:53:27 +03:00
|
|
|
rHash [32]byte
|
|
|
|
cltvDelta uint16
|
2019-02-19 11:09:01 +03:00
|
|
|
routeHints [][]zpay32.HopHint
|
2019-02-01 15:53:27 +03:00
|
|
|
outgoingChannelID *uint64
|
2019-05-30 02:31:32 +03:00
|
|
|
payReq []byte
|
2018-06-07 06:40:28 +03:00
|
|
|
|
2018-08-08 12:09:30 +03:00
|
|
|
route *route.Route
|
2018-06-07 06:40:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// extractPaymentIntent attempts to parse the complete details required to
|
|
|
|
// dispatch a client from the information presented by an RPC client. There are
|
|
|
|
// three ways a client can specify their payment details: a payment request,
|
|
|
|
// via manual details, or via a complete route.
|
|
|
|
func extractPaymentIntent(rpcPayReq *rpcPaymentRequest) (rpcPaymentIntent, error) {
|
|
|
|
payIntent := rpcPaymentIntent{}
|
|
|
|
|
|
|
|
// If a route was specified, then we can use that directly.
|
2018-08-08 12:09:30 +03:00
|
|
|
if rpcPayReq.route != nil {
|
2018-06-07 06:40:28 +03:00
|
|
|
// If the user is using the REST interface, then they'll be
|
|
|
|
// passing the payment hash as a hex encoded string.
|
|
|
|
if rpcPayReq.PaymentHashString != "" {
|
|
|
|
paymentHash, err := hex.DecodeString(
|
|
|
|
rpcPayReq.PaymentHashString,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return payIntent, err
|
|
|
|
}
|
|
|
|
|
|
|
|
copy(payIntent.rHash[:], paymentHash)
|
|
|
|
} else {
|
|
|
|
copy(payIntent.rHash[:], rpcPayReq.PaymentHash)
|
|
|
|
}
|
|
|
|
|
2018-08-08 12:09:30 +03:00
|
|
|
payIntent.route = rpcPayReq.route
|
2018-06-07 06:40:28 +03:00
|
|
|
return payIntent, nil
|
|
|
|
}
|
|
|
|
|
2019-02-01 15:53:27 +03:00
|
|
|
// If there are no routes specified, pass along a outgoing channel
|
|
|
|
// restriction if specified.
|
|
|
|
if rpcPayReq.OutgoingChanId != 0 {
|
|
|
|
payIntent.outgoingChannelID = &rpcPayReq.OutgoingChanId
|
|
|
|
}
|
|
|
|
|
2018-11-04 13:11:48 +03:00
|
|
|
// Take cltv limit from request if set.
|
|
|
|
if rpcPayReq.CltvLimit != 0 {
|
|
|
|
payIntent.cltvLimit = &rpcPayReq.CltvLimit
|
|
|
|
}
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
// If the payment request field isn't blank, then the details of the
|
|
|
|
// invoice are encoded entirely within the encoded payReq. So we'll
|
|
|
|
// attempt to decode it, populating the payment accordingly.
|
|
|
|
if rpcPayReq.PaymentRequest != "" {
|
|
|
|
payReq, err := zpay32.Decode(
|
|
|
|
rpcPayReq.PaymentRequest, activeNetParams.Params,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return payIntent, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, we'll ensure that this payreq hasn't already expired.
|
2019-04-18 10:34:28 +03:00
|
|
|
err = routerrpc.ValidatePayReqExpiry(payReq)
|
2018-06-07 06:40:28 +03:00
|
|
|
if err != nil {
|
|
|
|
return payIntent, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the amount was not included in the invoice, then we let
|
|
|
|
// the payee specify the amount of satoshis they wish to send.
|
|
|
|
// We override the amount to pay with the amount provided from
|
|
|
|
// the payment request.
|
|
|
|
if payReq.MilliSat == nil {
|
2018-06-12 02:25:34 +03:00
|
|
|
if rpcPayReq.Amt == 0 {
|
|
|
|
return payIntent, errors.New("amount must be " +
|
|
|
|
"specified when paying a zero amount " +
|
|
|
|
"invoice")
|
|
|
|
}
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
payIntent.msat = lnwire.NewMSatFromSatoshis(
|
|
|
|
btcutil.Amount(rpcPayReq.Amt),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
payIntent.msat = *payReq.MilliSat
|
|
|
|
}
|
|
|
|
|
2018-04-19 17:27:37 +03:00
|
|
|
// Calculate the fee limit that should be used for this payment.
|
|
|
|
payIntent.feeLimit = calculateFeeLimit(
|
|
|
|
rpcPayReq.FeeLimit, payIntent.msat,
|
|
|
|
)
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
copy(payIntent.rHash[:], payReq.PaymentHash[:])
|
2019-03-05 18:55:19 +03:00
|
|
|
destKey := payReq.Destination.SerializeCompressed()
|
|
|
|
copy(payIntent.dest[:], destKey)
|
2018-06-07 06:40:28 +03:00
|
|
|
payIntent.cltvDelta = uint16(payReq.MinFinalCLTVExpiry())
|
|
|
|
payIntent.routeHints = payReq.RouteHints
|
2019-05-30 02:31:32 +03:00
|
|
|
payIntent.payReq = []byte(rpcPayReq.PaymentRequest)
|
2018-06-07 06:40:28 +03:00
|
|
|
|
|
|
|
return payIntent, nil
|
2018-04-19 17:27:37 +03:00
|
|
|
}
|
2018-06-07 06:40:28 +03:00
|
|
|
|
2018-04-19 17:27:37 +03:00
|
|
|
// At this point, a destination MUST be specified, so we'll convert it
|
|
|
|
// into the proper representation now. The destination will either be
|
|
|
|
// encoded as raw bytes, or via a hex string.
|
2019-03-05 18:55:19 +03:00
|
|
|
var pubBytes []byte
|
2018-04-19 17:27:37 +03:00
|
|
|
if len(rpcPayReq.Dest) != 0 {
|
2019-03-05 18:55:19 +03:00
|
|
|
pubBytes = rpcPayReq.Dest
|
2018-04-19 17:27:37 +03:00
|
|
|
} else {
|
2019-03-05 18:55:19 +03:00
|
|
|
var err error
|
|
|
|
pubBytes, err = hex.DecodeString(rpcPayReq.DestString)
|
2018-04-19 17:27:37 +03:00
|
|
|
if err != nil {
|
|
|
|
return payIntent, err
|
|
|
|
}
|
|
|
|
}
|
2019-03-05 18:55:19 +03:00
|
|
|
if len(pubBytes) != 33 {
|
|
|
|
return payIntent, errors.New("invalid key length")
|
|
|
|
}
|
|
|
|
copy(payIntent.dest[:], pubBytes)
|
2018-06-07 06:40:28 +03:00
|
|
|
|
2018-04-19 17:27:37 +03:00
|
|
|
// Otherwise, If the payment request field was not specified
|
|
|
|
// (and a custom route wasn't specified), construct the payment
|
|
|
|
// from the other fields.
|
|
|
|
payIntent.msat = lnwire.NewMSatFromSatoshis(
|
|
|
|
btcutil.Amount(rpcPayReq.Amt),
|
|
|
|
)
|
2018-06-07 06:40:28 +03:00
|
|
|
|
2018-04-19 17:27:37 +03:00
|
|
|
// Calculate the fee limit that should be used for this payment.
|
|
|
|
payIntent.feeLimit = calculateFeeLimit(
|
|
|
|
rpcPayReq.FeeLimit, payIntent.msat,
|
|
|
|
)
|
|
|
|
|
2019-04-18 10:45:21 +03:00
|
|
|
if rpcPayReq.FinalCltvDelta != 0 {
|
|
|
|
payIntent.cltvDelta = uint16(rpcPayReq.FinalCltvDelta)
|
|
|
|
} else {
|
|
|
|
payIntent.cltvDelta = zpay32.DefaultFinalCLTVDelta
|
|
|
|
}
|
2018-04-19 17:27:37 +03:00
|
|
|
|
2018-07-01 01:13:14 +03:00
|
|
|
// If the user is manually specifying payment details, then the payment
|
|
|
|
// hash may be encoded as a string.
|
|
|
|
switch {
|
|
|
|
case rpcPayReq.PaymentHashString != "":
|
2018-04-19 17:27:37 +03:00
|
|
|
paymentHash, err := hex.DecodeString(
|
|
|
|
rpcPayReq.PaymentHashString,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return payIntent, err
|
2018-06-07 06:40:28 +03:00
|
|
|
}
|
2018-04-19 17:27:37 +03:00
|
|
|
|
|
|
|
copy(payIntent.rHash[:], paymentHash)
|
2018-06-07 06:40:28 +03:00
|
|
|
|
|
|
|
// If we're in debug HTLC mode, then all outgoing HTLCs will pay to the
|
|
|
|
// same debug rHash. Otherwise, we pay to the rHash specified within
|
|
|
|
// the RPC request.
|
2019-05-28 16:27:04 +03:00
|
|
|
case cfg.DebugHTLC &&
|
|
|
|
bytes.Equal(payIntent.rHash[:], lntypes.ZeroHash[:]):
|
|
|
|
|
2018-12-20 13:57:44 +03:00
|
|
|
copy(payIntent.rHash[:], invoices.DebugHash[:])
|
2018-07-01 01:13:14 +03:00
|
|
|
|
|
|
|
default:
|
|
|
|
copy(payIntent.rHash[:], rpcPayReq.PaymentHash)
|
2018-06-07 06:40:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Currently, within the bootstrap phase of the network, we limit the
|
|
|
|
// largest payment size allotted to (2^32) - 1 mSAT or 4.29 million
|
|
|
|
// satoshis.
|
2019-05-15 10:02:53 +03:00
|
|
|
if payIntent.msat > MaxPaymentMSat {
|
2018-06-07 06:40:28 +03:00
|
|
|
// In this case, we'll send an error to the caller, but
|
|
|
|
// continue our loop for the next payment.
|
|
|
|
return payIntent, fmt.Errorf("payment of %v is too large, "+
|
|
|
|
"max payment allowed is %v", payIntent.msat,
|
2019-05-15 10:02:53 +03:00
|
|
|
MaxPaymentMSat)
|
2018-06-07 06:40:28 +03:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
return payIntent, nil
|
|
|
|
}
|
|
|
|
|
2018-07-31 11:29:12 +03:00
|
|
|
type paymentIntentResponse struct {
|
2019-04-05 18:36:11 +03:00
|
|
|
Route *route.Route
|
2018-07-31 11:29:12 +03:00
|
|
|
Preimage [32]byte
|
|
|
|
Err error
|
|
|
|
}
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
// dispatchPaymentIntent attempts to fully dispatch an RPC payment intent.
|
|
|
|
// We'll either pass the payment as a whole to the channel router, or give it a
|
|
|
|
// pre-built route. The first error this method returns denotes if we were
|
|
|
|
// unable to save the payment. The second error returned denotes if the payment
|
|
|
|
// didn't succeed.
|
2018-07-31 11:29:12 +03:00
|
|
|
func (r *rpcServer) dispatchPaymentIntent(
|
|
|
|
payIntent *rpcPaymentIntent) (*paymentIntentResponse, error) {
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
// Construct a payment request to send to the channel router. If the
|
|
|
|
// payment is successful, the route chosen will be returned. Otherwise,
|
|
|
|
// we'll get a non-nil error.
|
|
|
|
var (
|
|
|
|
preImage [32]byte
|
2019-04-05 18:36:11 +03:00
|
|
|
route *route.Route
|
2018-06-07 06:40:28 +03:00
|
|
|
routerErr error
|
|
|
|
)
|
|
|
|
|
|
|
|
// If a route was specified, then we'll pass the route directly to the
|
|
|
|
// router, otherwise we'll create a payment session to execute it.
|
2018-08-08 12:09:30 +03:00
|
|
|
if payIntent.route == nil {
|
2018-06-07 06:40:28 +03:00
|
|
|
payment := &routing.LightningPayment{
|
2019-02-01 15:53:27 +03:00
|
|
|
Target: payIntent.dest,
|
|
|
|
Amount: payIntent.msat,
|
2019-04-18 10:45:21 +03:00
|
|
|
FinalCLTVDelta: payIntent.cltvDelta,
|
2019-02-01 15:53:27 +03:00
|
|
|
FeeLimit: payIntent.feeLimit,
|
2018-11-04 13:11:48 +03:00
|
|
|
CltvLimit: payIntent.cltvLimit,
|
2019-02-01 15:53:27 +03:00
|
|
|
PaymentHash: payIntent.rHash,
|
|
|
|
RouteHints: payIntent.routeHints,
|
|
|
|
OutgoingChannelID: payIntent.outgoingChannelID,
|
2019-05-30 02:31:32 +03:00
|
|
|
PaymentRequest: payIntent.payReq,
|
2019-06-07 12:27:55 +03:00
|
|
|
PayAttemptTimeout: routing.DefaultPayAttemptTimeout,
|
2018-06-07 06:40:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
preImage, route, routerErr = r.server.chanRouter.SendPayment(
|
|
|
|
payment,
|
|
|
|
)
|
|
|
|
} else {
|
2018-08-08 12:09:30 +03:00
|
|
|
preImage, routerErr = r.server.chanRouter.SendToRoute(
|
|
|
|
payIntent.rHash, payIntent.route,
|
2018-06-07 06:40:28 +03:00
|
|
|
)
|
2018-08-08 12:09:30 +03:00
|
|
|
|
|
|
|
route = payIntent.route
|
2018-06-07 06:40:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// If the route failed, then we'll return a nil save err, but a non-nil
|
|
|
|
// routing err.
|
|
|
|
if routerErr != nil {
|
2019-06-21 10:32:51 +03:00
|
|
|
rpcsLog.Warnf("Unable to send payment: %v", routerErr)
|
|
|
|
|
2018-07-31 11:29:12 +03:00
|
|
|
return &paymentIntentResponse{
|
|
|
|
Err: routerErr,
|
|
|
|
}, nil
|
2018-06-07 06:40:28 +03:00
|
|
|
}
|
|
|
|
|
2018-07-31 11:29:12 +03:00
|
|
|
return &paymentIntentResponse{
|
|
|
|
Route: route,
|
|
|
|
Preimage: preImage,
|
|
|
|
}, nil
|
2018-06-07 06:40:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// sendPayment takes a paymentStream (a source of pre-built routes or payment
|
|
|
|
// requests) and continually attempt to dispatch payment requests written to
|
|
|
|
// the write end of the stream. Responses will also be streamed back to the
|
|
|
|
// client via the write end of the stream. This method is by both SendToRoute
|
|
|
|
// and SendPayment as the logic is virtually identical.
|
2018-05-01 11:17:55 +03:00
|
|
|
func (r *rpcServer) sendPayment(stream *paymentStream) error {
|
2018-06-07 06:40:28 +03:00
|
|
|
payChan := make(chan *rpcPaymentIntent)
|
2016-07-22 02:22:30 +03:00
|
|
|
errChan := make(chan error, 1)
|
2016-10-27 01:05:10 +03:00
|
|
|
|
2017-08-03 06:59:43 +03:00
|
|
|
// We don't allow payments to be sent while the daemon itself is still
|
|
|
|
// syncing as we may be trying to sent a payment over a "stale"
|
|
|
|
// channel.
|
|
|
|
if !r.server.Started() {
|
|
|
|
return fmt.Errorf("chain backend is still syncing, server " +
|
|
|
|
"not active yet")
|
|
|
|
}
|
|
|
|
|
2017-06-17 01:11:02 +03:00
|
|
|
// TODO(roasbeef): check payment filter to see if already used?
|
|
|
|
|
2017-04-12 07:24:16 +03:00
|
|
|
// In order to limit the level of concurrency and prevent a client from
|
|
|
|
// attempting to OOM the server, we'll set up a semaphore to create an
|
|
|
|
// upper ceiling on the number of outstanding payments.
|
|
|
|
const numOutstandingPayments = 2000
|
|
|
|
htlcSema := make(chan struct{}, numOutstandingPayments)
|
|
|
|
for i := 0; i < numOutstandingPayments; i++ {
|
|
|
|
htlcSema <- struct{}{}
|
|
|
|
}
|
|
|
|
|
2016-10-27 01:05:10 +03:00
|
|
|
// Launch a new goroutine to handle reading new payment requests from
|
|
|
|
// the client. This way we can handle errors independently of blocking
|
|
|
|
// and waiting for the next payment request to come through.
|
2017-08-22 10:24:37 +03:00
|
|
|
reqQuit := make(chan struct{})
|
|
|
|
defer func() {
|
|
|
|
close(reqQuit)
|
|
|
|
}()
|
2018-10-15 22:13:37 +03:00
|
|
|
|
|
|
|
// TODO(joostjager): Callers expect result to come in in the same order
|
|
|
|
// as the request were sent, but this is far from guarantueed in the
|
|
|
|
// code below.
|
2016-10-27 01:05:10 +03:00
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
2017-08-22 10:24:37 +03:00
|
|
|
case <-reqQuit:
|
|
|
|
return
|
2016-10-27 01:05:10 +03:00
|
|
|
case <-r.quit:
|
|
|
|
errChan <- nil
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
// Receive the next pending payment within the
|
|
|
|
// stream sent by the client. If we read the
|
|
|
|
// EOF sentinel, then the client has closed the
|
|
|
|
// stream, and we can exit normally.
|
2018-05-01 11:17:55 +03:00
|
|
|
nextPayment, err := stream.recv()
|
2016-10-27 01:05:10 +03:00
|
|
|
if err == io.EOF {
|
|
|
|
errChan <- nil
|
|
|
|
return
|
|
|
|
} else if err != nil {
|
2017-08-22 10:24:37 +03:00
|
|
|
select {
|
|
|
|
case errChan <- err:
|
|
|
|
case <-reqQuit:
|
|
|
|
return
|
|
|
|
}
|
2016-10-27 01:05:10 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-09-05 19:08:02 +03:00
|
|
|
// Populate the next payment, either from the
|
|
|
|
// payment request, or from the explicitly set
|
2018-06-07 06:40:28 +03:00
|
|
|
// fields. If the payment proto wasn't well
|
|
|
|
// formed, then we'll send an error reply and
|
|
|
|
// wait for the next payment.
|
|
|
|
payIntent, err := extractPaymentIntent(nextPayment)
|
|
|
|
if err != nil {
|
|
|
|
if err := stream.send(&lnrpc.SendResponse{
|
|
|
|
PaymentError: err.Error(),
|
2018-12-05 09:57:44 +03:00
|
|
|
PaymentHash: payIntent.rHash[:],
|
2018-06-07 06:40:28 +03:00
|
|
|
}); err != nil {
|
|
|
|
select {
|
|
|
|
case errChan <- err:
|
|
|
|
case <-reqQuit:
|
2018-05-01 11:17:55 +03:00
|
|
|
return
|
2017-09-05 19:08:02 +03:00
|
|
|
}
|
|
|
|
}
|
2018-06-07 06:40:28 +03:00
|
|
|
continue
|
2017-01-03 02:36:15 +03:00
|
|
|
}
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
// If the payment was well formed, then we'll
|
|
|
|
// send to the dispatch goroutine, or exit,
|
|
|
|
// which ever comes first
|
2017-08-22 10:24:37 +03:00
|
|
|
select {
|
2018-06-07 06:40:28 +03:00
|
|
|
case payChan <- &payIntent:
|
2017-08-22 10:24:37 +03:00
|
|
|
case <-reqQuit:
|
|
|
|
return
|
|
|
|
}
|
2016-10-27 01:05:10 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2016-09-21 03:15:26 +03:00
|
|
|
|
2016-07-13 03:46:25 +03:00
|
|
|
for {
|
2016-07-22 02:22:30 +03:00
|
|
|
select {
|
|
|
|
case err := <-errChan:
|
2016-07-13 03:46:25 +03:00
|
|
|
return err
|
2016-12-27 08:51:18 +03:00
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
case payIntent := <-payChan:
|
2016-11-11 04:37:21 +03:00
|
|
|
// We launch a new goroutine to execute the current
|
|
|
|
// payment so we can continue to serve requests while
|
2016-12-27 08:51:18 +03:00
|
|
|
// this payment is being dispatched.
|
2016-07-22 02:22:30 +03:00
|
|
|
go func() {
|
2017-04-12 07:24:16 +03:00
|
|
|
// Attempt to grab a free semaphore slot, using
|
|
|
|
// a defer to eventually release the slot
|
|
|
|
// regardless of payment success.
|
|
|
|
<-htlcSema
|
|
|
|
defer func() {
|
|
|
|
htlcSema <- struct{}{}
|
|
|
|
}()
|
|
|
|
|
2018-07-31 11:29:12 +03:00
|
|
|
resp, saveErr := r.dispatchPaymentIntent(
|
2018-06-07 06:40:28 +03:00
|
|
|
payIntent,
|
2018-05-01 11:17:55 +03:00
|
|
|
)
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
switch {
|
2018-07-31 11:29:12 +03:00
|
|
|
// If we were unable to save the state of the
|
|
|
|
// payment, then we'll return the error to the
|
|
|
|
// user, and terminate.
|
|
|
|
case saveErr != nil:
|
|
|
|
errChan <- saveErr
|
|
|
|
return
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
// If we receive payment error than, instead of
|
|
|
|
// terminating the stream, send error response
|
|
|
|
// to the user.
|
2018-07-31 11:29:12 +03:00
|
|
|
case resp.Err != nil:
|
2018-05-01 11:17:55 +03:00
|
|
|
err := stream.send(&lnrpc.SendResponse{
|
2018-07-31 11:29:12 +03:00
|
|
|
PaymentError: resp.Err.Error(),
|
2018-12-05 09:57:44 +03:00
|
|
|
PaymentHash: payIntent.rHash[:],
|
2017-05-19 15:18:21 +03:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
errChan <- err
|
|
|
|
}
|
2016-07-22 02:22:30 +03:00
|
|
|
return
|
2016-12-27 08:51:18 +03:00
|
|
|
}
|
2016-12-21 12:19:01 +03:00
|
|
|
|
2019-03-14 16:19:35 +03:00
|
|
|
marshalledRouted := r.routerBackend.
|
|
|
|
MarshallRoute(resp.Route)
|
|
|
|
|
2018-05-01 11:17:55 +03:00
|
|
|
err := stream.send(&lnrpc.SendResponse{
|
2018-12-05 09:57:44 +03:00
|
|
|
PaymentHash: payIntent.rHash[:],
|
2018-07-31 11:29:12 +03:00
|
|
|
PaymentPreimage: resp.Preimage[:],
|
|
|
|
PaymentRoute: marshalledRouted,
|
2017-02-02 05:29:46 +03:00
|
|
|
})
|
|
|
|
if err != nil {
|
2016-07-22 02:22:30 +03:00
|
|
|
errChan <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}()
|
2016-07-13 03:46:25 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-07-15 14:02:59 +03:00
|
|
|
|
2016-11-11 04:37:21 +03:00
|
|
|
// SendPaymentSync is the synchronous non-streaming version of SendPayment.
|
|
|
|
// This RPC is intended to be consumed by clients of the REST proxy.
|
|
|
|
// Additionally, this RPC expects the destination's public key and the payment
|
|
|
|
// hash (if any) to be encoded as hex strings.
|
|
|
|
func (r *rpcServer) SendPaymentSync(ctx context.Context,
|
|
|
|
nextPayment *lnrpc.SendRequest) (*lnrpc.SendResponse, error) {
|
2017-08-22 10:28:40 +03:00
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
return r.sendPaymentSync(ctx, &rpcPaymentRequest{
|
2018-05-01 11:17:55 +03:00
|
|
|
SendRequest: nextPayment,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// SendToRouteSync is the synchronous non-streaming version of SendToRoute.
|
|
|
|
// This RPC is intended to be consumed by clients of the REST proxy.
|
2018-06-07 06:40:28 +03:00
|
|
|
// Additionally, this RPC expects the payment hash (if any) to be encoded as
|
|
|
|
// hex strings.
|
2018-05-01 11:17:55 +03:00
|
|
|
func (r *rpcServer) SendToRouteSync(ctx context.Context,
|
|
|
|
req *lnrpc.SendToRouteRequest) (*lnrpc.SendResponse, error) {
|
|
|
|
|
2018-08-08 12:09:30 +03:00
|
|
|
if req.Route == nil {
|
2018-05-01 11:17:55 +03:00
|
|
|
return nil, fmt.Errorf("unable to send, no routes provided")
|
|
|
|
}
|
|
|
|
|
2019-03-14 17:13:45 +03:00
|
|
|
paymentRequest, err := r.unmarshallSendToRouteRequest(req)
|
2019-01-21 14:50:36 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2018-05-01 11:17:55 +03:00
|
|
|
}
|
|
|
|
|
2019-01-21 14:50:36 +03:00
|
|
|
return r.sendPaymentSync(ctx, paymentRequest)
|
2018-05-01 11:17:55 +03:00
|
|
|
}
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
// sendPaymentSync is the synchronous variant of sendPayment. It will block and
|
|
|
|
// wait until the payment has been fully completed.
|
2018-05-01 11:17:55 +03:00
|
|
|
func (r *rpcServer) sendPaymentSync(ctx context.Context,
|
2018-06-07 06:40:28 +03:00
|
|
|
nextPayment *rpcPaymentRequest) (*lnrpc.SendResponse, error) {
|
2018-05-01 11:17:55 +03:00
|
|
|
|
2017-08-03 06:59:43 +03:00
|
|
|
// We don't allow payments to be sent while the daemon itself is still
|
|
|
|
// syncing as we may be trying to sent a payment over a "stale"
|
|
|
|
// channel.
|
|
|
|
if !r.server.Started() {
|
|
|
|
return nil, fmt.Errorf("chain backend is still syncing, server " +
|
|
|
|
"not active yet")
|
|
|
|
}
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
// First we'll attempt to map the proto describing the next payment to
|
|
|
|
// an intent that we can pass to local sub-systems.
|
|
|
|
payIntent, err := extractPaymentIntent(nextPayment)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2017-10-23 04:31:57 +03:00
|
|
|
}
|
2018-05-01 11:17:55 +03:00
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
// With the payment validated, we'll now attempt to dispatch the
|
|
|
|
// payment.
|
2018-07-31 11:29:12 +03:00
|
|
|
resp, saveErr := r.dispatchPaymentIntent(&payIntent)
|
2018-06-07 06:40:28 +03:00
|
|
|
switch {
|
2018-07-31 11:29:12 +03:00
|
|
|
case saveErr != nil:
|
|
|
|
return nil, saveErr
|
|
|
|
|
|
|
|
case resp.Err != nil:
|
2017-10-26 01:32:00 +03:00
|
|
|
return &lnrpc.SendResponse{
|
2018-07-31 11:29:12 +03:00
|
|
|
PaymentError: resp.Err.Error(),
|
2018-12-05 09:57:44 +03:00
|
|
|
PaymentHash: payIntent.rHash[:],
|
2017-10-26 01:32:00 +03:00
|
|
|
}, nil
|
2016-12-27 08:51:18 +03:00
|
|
|
}
|
|
|
|
|
2017-02-21 03:33:45 +03:00
|
|
|
return &lnrpc.SendResponse{
|
2018-12-05 09:57:44 +03:00
|
|
|
PaymentHash: payIntent.rHash[:],
|
2018-07-31 11:29:12 +03:00
|
|
|
PaymentPreimage: resp.Preimage[:],
|
2019-03-14 16:19:35 +03:00
|
|
|
PaymentRoute: r.routerBackend.MarshallRoute(resp.Route),
|
2017-02-21 03:33:45 +03:00
|
|
|
}, nil
|
2016-11-11 04:37:21 +03:00
|
|
|
}
|
|
|
|
|
2016-09-19 22:04:56 +03:00
|
|
|
// AddInvoice attempts to add a new invoice to the invoice database. Any
|
|
|
|
// duplicated invoices are rejected, therefore all invoices *must* have a
|
|
|
|
// unique payment preimage.
|
|
|
|
func (r *rpcServer) AddInvoice(ctx context.Context,
|
|
|
|
invoice *lnrpc.Invoice) (*lnrpc.AddInvoiceResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2019-01-14 19:56:59 +03:00
|
|
|
defaultDelta := cfg.Bitcoin.TimeLockDelta
|
|
|
|
if registeredChains.PrimaryChain() == litecoinChain {
|
|
|
|
defaultDelta = cfg.Litecoin.TimeLockDelta
|
2017-09-05 19:08:02 +03:00
|
|
|
}
|
|
|
|
|
2019-01-14 19:56:59 +03:00
|
|
|
addInvoiceCfg := &invoicesrpc.AddInvoiceConfig{
|
|
|
|
AddInvoice: r.server.invoices.AddInvoice,
|
|
|
|
IsChannelActive: r.server.htlcSwitch.HasActiveLink,
|
|
|
|
ChainParams: activeNetParams.Params,
|
|
|
|
NodeSigner: r.server.nodeSigner,
|
2019-05-15 10:02:53 +03:00
|
|
|
MaxPaymentMSat: MaxPaymentMSat,
|
2019-01-14 19:56:59 +03:00
|
|
|
DefaultCLTVExpiry: defaultDelta,
|
|
|
|
ChanDB: r.server.chanDB,
|
2017-09-05 19:08:02 +03:00
|
|
|
}
|
|
|
|
|
2019-01-15 12:06:48 +03:00
|
|
|
addInvoiceData := &invoicesrpc.AddInvoiceData{
|
|
|
|
Memo: invoice.Memo,
|
|
|
|
Receipt: invoice.Receipt,
|
|
|
|
Value: btcutil.Amount(invoice.Value),
|
|
|
|
DescriptionHash: invoice.DescriptionHash,
|
|
|
|
Expiry: invoice.Expiry,
|
|
|
|
FallbackAddr: invoice.FallbackAddr,
|
|
|
|
CltvExpiry: invoice.CltvExpiry,
|
|
|
|
Private: invoice.Private,
|
|
|
|
}
|
|
|
|
|
|
|
|
if invoice.RPreimage != nil {
|
|
|
|
preimage, err := lntypes.MakePreimage(invoice.RPreimage)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
addInvoiceData.Preimage = &preimage
|
|
|
|
}
|
|
|
|
|
|
|
|
hash, dbInvoice, err := invoicesrpc.AddInvoice(
|
|
|
|
ctx, addInvoiceCfg, addInvoiceData,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return &lnrpc.AddInvoiceResponse{
|
|
|
|
AddIndex: dbInvoice.AddIndex,
|
|
|
|
PaymentRequest: string(dbInvoice.PaymentRequest),
|
|
|
|
RHash: hash[:],
|
|
|
|
}, nil
|
2016-09-19 22:04:56 +03:00
|
|
|
}
|
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
// LookupInvoice attempts to look up an invoice according to its payment hash.
|
2016-09-19 22:04:56 +03:00
|
|
|
// The passed payment hash *must* be exactly 32 bytes, if not an error is
|
|
|
|
// returned.
|
|
|
|
func (r *rpcServer) LookupInvoice(ctx context.Context,
|
|
|
|
req *lnrpc.PaymentHash) (*lnrpc.Invoice, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2016-10-16 00:41:11 +03:00
|
|
|
var (
|
|
|
|
payHash [32]byte
|
|
|
|
rHash []byte
|
|
|
|
err error
|
|
|
|
)
|
|
|
|
|
|
|
|
// If the RHash as a raw string was provided, then decode that and use
|
|
|
|
// that directly. Otherwise, we use the raw bytes provided.
|
|
|
|
if req.RHashStr != "" {
|
|
|
|
rHash, err = hex.DecodeString(req.RHashStr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
rHash = req.RHash
|
2016-09-19 22:04:56 +03:00
|
|
|
}
|
|
|
|
|
2016-10-16 00:41:11 +03:00
|
|
|
// Ensure that the payment hash is *exactly* 32-bytes.
|
|
|
|
if len(rHash) != 0 && len(rHash) != 32 {
|
|
|
|
return nil, fmt.Errorf("payment hash must be exactly "+
|
|
|
|
"32 bytes, is instead %v", len(rHash))
|
|
|
|
}
|
|
|
|
copy(payHash[:], rHash)
|
2016-09-19 22:04:56 +03:00
|
|
|
|
2016-09-26 20:29:18 +03:00
|
|
|
rpcsLog.Tracef("[lookupinvoice] searching for invoice %x", payHash[:])
|
|
|
|
|
2018-06-30 02:03:46 +03:00
|
|
|
invoice, _, err := r.server.invoices.LookupInvoice(payHash)
|
2016-09-19 22:04:56 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-09-26 20:29:18 +03:00
|
|
|
rpcsLog.Tracef("[lookupinvoice] located invoice %v",
|
|
|
|
newLogClosure(func() string {
|
|
|
|
return spew.Sdump(invoice)
|
|
|
|
}))
|
|
|
|
|
2019-01-03 21:13:08 +03:00
|
|
|
rpcInvoice, err := invoicesrpc.CreateRPCInvoice(
|
|
|
|
&invoice, activeNetParams.Params,
|
|
|
|
)
|
2017-09-05 19:08:02 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return rpcInvoice, nil
|
2016-09-19 22:04:56 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListInvoices returns a list of all the invoices currently stored within the
|
|
|
|
// database. Any active debug invoices are ignored.
|
|
|
|
func (r *rpcServer) ListInvoices(ctx context.Context,
|
|
|
|
req *lnrpc.ListInvoiceRequest) (*lnrpc.ListInvoiceResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2018-08-11 06:24:43 +03:00
|
|
|
// If the number of invoices was not specified, then we'll default to
|
|
|
|
// returning the latest 100 invoices.
|
|
|
|
if req.NumMaxInvoices == 0 {
|
|
|
|
req.NumMaxInvoices = 100
|
2016-09-19 22:04:56 +03:00
|
|
|
}
|
|
|
|
|
2018-08-11 06:24:43 +03:00
|
|
|
// Next, we'll map the proto request into a format that is understood by
|
|
|
|
// the database.
|
|
|
|
q := channeldb.InvoiceQuery{
|
|
|
|
IndexOffset: req.IndexOffset,
|
|
|
|
NumMaxInvoices: req.NumMaxInvoices,
|
|
|
|
PendingOnly: req.PendingOnly,
|
2018-09-11 04:21:13 +03:00
|
|
|
Reversed: req.Reversed,
|
2018-08-11 06:24:43 +03:00
|
|
|
}
|
|
|
|
invoiceSlice, err := r.server.chanDB.QueryInvoices(q)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to query invoices: %v", err)
|
|
|
|
}
|
2017-04-24 05:20:20 +03:00
|
|
|
|
2018-08-11 06:24:43 +03:00
|
|
|
// Before returning the response, we'll need to convert each invoice
|
|
|
|
// into it's proto representation.
|
|
|
|
resp := &lnrpc.ListInvoiceResponse{
|
2018-09-11 04:21:13 +03:00
|
|
|
Invoices: make([]*lnrpc.Invoice, len(invoiceSlice.Invoices)),
|
|
|
|
FirstIndexOffset: invoiceSlice.FirstIndexOffset,
|
|
|
|
LastIndexOffset: invoiceSlice.LastIndexOffset,
|
2018-08-11 06:24:43 +03:00
|
|
|
}
|
|
|
|
for i, invoice := range invoiceSlice.Invoices {
|
2019-01-03 21:13:08 +03:00
|
|
|
resp.Invoices[i], err = invoicesrpc.CreateRPCInvoice(
|
|
|
|
&invoice, activeNetParams.Params,
|
|
|
|
)
|
2017-09-05 19:08:02 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2016-09-19 22:04:56 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-11 06:24:43 +03:00
|
|
|
return resp, nil
|
2016-09-19 22:04:56 +03:00
|
|
|
}
|
|
|
|
|
2017-12-30 17:44:31 +03:00
|
|
|
// SubscribeInvoices returns a uni-directional stream (server -> client) for
|
2016-10-16 00:41:11 +03:00
|
|
|
// notifying the client of newly added/settled invoices.
|
|
|
|
func (r *rpcServer) SubscribeInvoices(req *lnrpc.InvoiceSubscription,
|
|
|
|
updateStream lnrpc.Lightning_SubscribeInvoicesServer) error {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2018-04-25 07:08:19 +03:00
|
|
|
invoiceClient := r.server.invoices.SubscribeNotifications(
|
|
|
|
req.AddIndex, req.SettleIndex,
|
|
|
|
)
|
2016-10-16 00:41:11 +03:00
|
|
|
defer invoiceClient.Cancel()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
2018-04-25 07:08:19 +03:00
|
|
|
case newInvoice := <-invoiceClient.NewInvoices:
|
2019-01-03 21:13:08 +03:00
|
|
|
rpcInvoice, err := invoicesrpc.CreateRPCInvoice(
|
|
|
|
newInvoice, activeNetParams.Params,
|
|
|
|
)
|
2018-04-25 07:08:19 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-09-05 19:08:02 +03:00
|
|
|
|
2018-04-25 07:08:19 +03:00
|
|
|
if err := updateStream.Send(rpcInvoice); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
case settledInvoice := <-invoiceClient.SettledInvoices:
|
2019-01-03 21:13:08 +03:00
|
|
|
rpcInvoice, err := invoicesrpc.CreateRPCInvoice(
|
|
|
|
settledInvoice, activeNetParams.Params,
|
|
|
|
)
|
2017-09-05 19:08:02 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2016-10-16 00:41:11 +03:00
|
|
|
}
|
2017-09-05 19:08:02 +03:00
|
|
|
|
|
|
|
if err := updateStream.Send(rpcInvoice); err != nil {
|
2016-10-16 00:41:11 +03:00
|
|
|
return err
|
|
|
|
}
|
2018-04-25 07:03:05 +03:00
|
|
|
|
2016-10-16 00:41:11 +03:00
|
|
|
case <-r.quit:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// SubscribeTransactions creates a uni-directional stream (server -> client) in
|
|
|
|
// which any newly discovered transactions relevant to the wallet are sent
|
|
|
|
// over.
|
|
|
|
func (r *rpcServer) SubscribeTransactions(req *lnrpc.GetTransactionsRequest,
|
|
|
|
updateStream lnrpc.Lightning_SubscribeTransactionsServer) error {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2017-05-18 21:55:25 +03:00
|
|
|
txClient, err := r.server.cc.wallet.SubscribeTransactions()
|
2016-10-16 00:41:11 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer txClient.Cancel()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case tx := <-txClient.ConfirmedTransactions():
|
2019-07-10 09:40:41 +03:00
|
|
|
destAddresses := make([]string, 0, len(tx.DestAddresses))
|
|
|
|
for _, destAddress := range tx.DestAddresses {
|
|
|
|
destAddresses = append(destAddresses, destAddress.EncodeAddress())
|
|
|
|
}
|
2016-10-16 00:41:11 +03:00
|
|
|
detail := &lnrpc.Transaction{
|
|
|
|
TxHash: tx.Hash.String(),
|
2017-03-06 01:53:37 +03:00
|
|
|
Amount: int64(tx.Value),
|
2016-10-16 00:41:11 +03:00
|
|
|
NumConfirmations: tx.NumConfirmations,
|
|
|
|
BlockHash: tx.BlockHash.String(),
|
|
|
|
TimeStamp: tx.Timestamp,
|
|
|
|
TotalFees: tx.TotalFees,
|
2019-07-10 09:40:41 +03:00
|
|
|
DestAddresses: destAddresses,
|
2019-06-12 06:58:37 +03:00
|
|
|
RawTxHex: hex.EncodeToString(tx.RawTx),
|
2016-10-16 00:41:11 +03:00
|
|
|
}
|
|
|
|
if err := updateStream.Send(detail); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-04-25 07:03:05 +03:00
|
|
|
|
2016-10-16 00:41:11 +03:00
|
|
|
case tx := <-txClient.UnconfirmedTransactions():
|
2019-07-10 09:40:41 +03:00
|
|
|
var destAddresses []string
|
|
|
|
for _, destAddress := range tx.DestAddresses {
|
|
|
|
destAddresses = append(destAddresses, destAddress.EncodeAddress())
|
|
|
|
}
|
2016-10-16 00:41:11 +03:00
|
|
|
detail := &lnrpc.Transaction{
|
2019-07-10 09:40:41 +03:00
|
|
|
TxHash: tx.Hash.String(),
|
|
|
|
Amount: int64(tx.Value),
|
|
|
|
TimeStamp: tx.Timestamp,
|
|
|
|
TotalFees: tx.TotalFees,
|
|
|
|
DestAddresses: destAddresses,
|
|
|
|
RawTxHex: hex.EncodeToString(tx.RawTx),
|
2016-10-16 00:41:11 +03:00
|
|
|
}
|
|
|
|
if err := updateStream.Send(detail); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-04-25 07:03:05 +03:00
|
|
|
|
2016-10-16 00:41:11 +03:00
|
|
|
case <-r.quit:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetTransactions returns a list of describing all the known transactions
|
|
|
|
// relevant to the wallet.
|
2017-08-18 04:50:57 +03:00
|
|
|
func (r *rpcServer) GetTransactions(ctx context.Context,
|
|
|
|
_ *lnrpc.GetTransactionsRequest) (*lnrpc.TransactionDetails, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2017-11-17 03:00:58 +03:00
|
|
|
// TODO(roasbeef): add pagination support
|
2017-05-18 21:55:25 +03:00
|
|
|
transactions, err := r.server.cc.wallet.ListTransactionDetails()
|
2016-10-16 00:41:11 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
txDetails := &lnrpc.TransactionDetails{
|
|
|
|
Transactions: make([]*lnrpc.Transaction, len(transactions)),
|
|
|
|
}
|
|
|
|
for i, tx := range transactions {
|
2017-12-06 20:19:37 +03:00
|
|
|
var destAddresses []string
|
|
|
|
for _, destAddress := range tx.DestAddresses {
|
|
|
|
destAddresses = append(destAddresses, destAddress.EncodeAddress())
|
|
|
|
}
|
|
|
|
|
2018-09-15 12:07:27 +03:00
|
|
|
// We also get unconfirmed transactions, so BlockHash can be
|
|
|
|
// nil.
|
|
|
|
blockHash := ""
|
|
|
|
if tx.BlockHash != nil {
|
|
|
|
blockHash = tx.BlockHash.String()
|
|
|
|
}
|
|
|
|
|
2016-10-16 00:41:11 +03:00
|
|
|
txDetails.Transactions[i] = &lnrpc.Transaction{
|
|
|
|
TxHash: tx.Hash.String(),
|
2017-03-06 01:53:37 +03:00
|
|
|
Amount: int64(tx.Value),
|
2016-10-16 00:41:11 +03:00
|
|
|
NumConfirmations: tx.NumConfirmations,
|
2018-09-15 12:07:27 +03:00
|
|
|
BlockHash: blockHash,
|
2017-03-06 00:39:42 +03:00
|
|
|
BlockHeight: tx.BlockHeight,
|
2016-10-16 00:41:11 +03:00
|
|
|
TimeStamp: tx.Timestamp,
|
|
|
|
TotalFees: tx.TotalFees,
|
2017-12-06 20:19:37 +03:00
|
|
|
DestAddresses: destAddresses,
|
2019-06-07 17:38:00 +03:00
|
|
|
RawTxHex: hex.EncodeToString(tx.RawTx),
|
2016-10-16 00:41:11 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return txDetails, nil
|
|
|
|
}
|
|
|
|
|
2016-12-27 08:51:47 +03:00
|
|
|
// DescribeGraph returns a description of the latest graph state from the PoV
|
|
|
|
// of the node. The graph information is partitioned into two components: all
|
|
|
|
// the nodes/vertexes, and all the edges that connect the vertexes themselves.
|
|
|
|
// As this is a directed graph, the edges also contain the node directional
|
|
|
|
// specific routing policy which includes: the time lock delta, fee
|
|
|
|
// information, etc.
|
2017-08-18 04:50:57 +03:00
|
|
|
func (r *rpcServer) DescribeGraph(ctx context.Context,
|
2018-09-26 18:30:09 +03:00
|
|
|
req *lnrpc.ChannelGraphRequest) (*lnrpc.ChannelGraph, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2016-12-27 08:51:47 +03:00
|
|
|
resp := &lnrpc.ChannelGraph{}
|
2018-09-26 18:30:09 +03:00
|
|
|
includeUnannounced := req.IncludeUnannounced
|
2016-09-21 03:15:26 +03:00
|
|
|
|
2017-04-14 23:17:51 +03:00
|
|
|
// Obtain the pointer to the global singleton channel graph, this will
|
2016-12-27 08:51:47 +03:00
|
|
|
// provide a consistent view of the graph due to bolt db's
|
|
|
|
// transactional model.
|
|
|
|
graph := r.server.chanDB.ChannelGraph()
|
|
|
|
|
|
|
|
// First iterate through all the known nodes (connected or unconnected
|
|
|
|
// within the graph), collating their current state into the RPC
|
|
|
|
// response.
|
2018-11-30 07:04:21 +03:00
|
|
|
err := graph.ForEachNode(nil, func(_ *bbolt.Tx, node *channeldb.LightningNode) error {
|
2017-02-17 12:29:23 +03:00
|
|
|
nodeAddrs := make([]*lnrpc.NodeAddress, 0)
|
|
|
|
for _, addr := range node.Addresses {
|
|
|
|
nodeAddr := &lnrpc.NodeAddress{
|
|
|
|
Network: addr.Network(),
|
|
|
|
Addr: addr.String(),
|
|
|
|
}
|
|
|
|
nodeAddrs = append(nodeAddrs, nodeAddr)
|
|
|
|
}
|
2017-12-03 05:37:34 +03:00
|
|
|
|
2016-12-27 08:51:47 +03:00
|
|
|
resp.Nodes = append(resp.Nodes, &lnrpc.LightningNode{
|
|
|
|
LastUpdate: uint32(node.LastUpdate.Unix()),
|
2018-01-31 07:30:00 +03:00
|
|
|
PubKey: hex.EncodeToString(node.PubKeyBytes[:]),
|
2017-02-17 12:29:23 +03:00
|
|
|
Addresses: nodeAddrs,
|
2016-12-27 08:51:47 +03:00
|
|
|
Alias: node.Alias,
|
2018-12-21 19:34:56 +03:00
|
|
|
Color: routing.EncodeHexColor(node.Color),
|
2016-12-27 08:51:47 +03:00
|
|
|
})
|
2017-02-17 12:29:23 +03:00
|
|
|
|
2016-12-27 08:51:47 +03:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, for each active channel we know of within the graph, create a
|
|
|
|
// similar response which details both the edge information as well as
|
|
|
|
// the routing policies of th nodes connecting the two edges.
|
2017-03-06 04:28:12 +03:00
|
|
|
err = graph.ForEachChannel(func(edgeInfo *channeldb.ChannelEdgeInfo,
|
|
|
|
c1, c2 *channeldb.ChannelEdgePolicy) error {
|
|
|
|
|
2018-09-26 18:30:09 +03:00
|
|
|
// Do not include unannounced channels unless specifically
|
|
|
|
// requested. Unannounced channels include both private channels as
|
|
|
|
// well as public channels whose authentication proof were not
|
|
|
|
// confirmed yet, hence were not announced.
|
|
|
|
if !includeUnannounced && edgeInfo.AuthProof == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-03-06 04:28:12 +03:00
|
|
|
edge := marshalDbEdge(edgeInfo, c1, c2)
|
2016-12-27 08:51:47 +03:00
|
|
|
resp.Edges = append(resp.Edges, edge)
|
2018-09-26 18:30:09 +03:00
|
|
|
|
2016-12-27 08:51:47 +03:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil && err != channeldb.ErrGraphNoEdgesFound {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2017-03-06 04:28:12 +03:00
|
|
|
func marshalDbEdge(edgeInfo *channeldb.ChannelEdgeInfo,
|
|
|
|
c1, c2 *channeldb.ChannelEdgePolicy) *lnrpc.ChannelEdge {
|
|
|
|
|
2017-01-18 00:20:06 +03:00
|
|
|
var (
|
2017-03-06 04:28:12 +03:00
|
|
|
lastUpdate int64
|
2017-01-18 00:20:06 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
if c2 != nil {
|
|
|
|
lastUpdate = c2.LastUpdate.Unix()
|
|
|
|
}
|
|
|
|
if c1 != nil {
|
|
|
|
lastUpdate = c1.LastUpdate.Unix()
|
|
|
|
}
|
2016-12-27 08:51:47 +03:00
|
|
|
|
|
|
|
edge := &lnrpc.ChannelEdge{
|
2017-03-06 04:28:12 +03:00
|
|
|
ChannelId: edgeInfo.ChannelID,
|
|
|
|
ChanPoint: edgeInfo.ChannelPoint.String(),
|
2017-01-18 00:20:06 +03:00
|
|
|
// TODO(roasbeef): update should be on edge info itself
|
|
|
|
LastUpdate: uint32(lastUpdate),
|
2018-01-31 07:30:00 +03:00
|
|
|
Node1Pub: hex.EncodeToString(edgeInfo.NodeKey1Bytes[:]),
|
|
|
|
Node2Pub: hex.EncodeToString(edgeInfo.NodeKey2Bytes[:]),
|
2017-03-06 04:28:12 +03:00
|
|
|
Capacity: int64(edgeInfo.Capacity),
|
2016-12-27 08:51:47 +03:00
|
|
|
}
|
|
|
|
|
2017-01-18 00:20:06 +03:00
|
|
|
if c1 != nil {
|
|
|
|
edge.Node1Policy = &lnrpc.RoutingPolicy{
|
2017-03-06 04:28:12 +03:00
|
|
|
TimeLockDelta: uint32(c1.TimeLockDelta),
|
2017-01-18 00:20:06 +03:00
|
|
|
MinHtlc: int64(c1.MinHTLC),
|
2019-02-25 18:49:39 +03:00
|
|
|
MaxHtlcMsat: uint64(c1.MaxHTLC),
|
2017-01-18 00:20:06 +03:00
|
|
|
FeeBaseMsat: int64(c1.FeeBaseMSat),
|
|
|
|
FeeRateMilliMsat: int64(c1.FeeProportionalMillionths),
|
2019-01-12 20:59:43 +03:00
|
|
|
Disabled: c1.ChannelFlags&lnwire.ChanUpdateDisabled != 0,
|
2019-06-28 11:59:44 +03:00
|
|
|
LastUpdate: uint32(c1.LastUpdate.Unix()),
|
2017-01-18 00:20:06 +03:00
|
|
|
}
|
2016-12-27 08:51:47 +03:00
|
|
|
}
|
|
|
|
|
2017-01-18 00:20:06 +03:00
|
|
|
if c2 != nil {
|
|
|
|
edge.Node2Policy = &lnrpc.RoutingPolicy{
|
2017-03-06 04:28:12 +03:00
|
|
|
TimeLockDelta: uint32(c2.TimeLockDelta),
|
2017-01-18 00:20:06 +03:00
|
|
|
MinHtlc: int64(c2.MinHTLC),
|
2019-02-25 18:49:39 +03:00
|
|
|
MaxHtlcMsat: uint64(c2.MaxHTLC),
|
2017-01-18 00:20:06 +03:00
|
|
|
FeeBaseMsat: int64(c2.FeeBaseMSat),
|
|
|
|
FeeRateMilliMsat: int64(c2.FeeProportionalMillionths),
|
2019-01-12 20:59:43 +03:00
|
|
|
Disabled: c2.ChannelFlags&lnwire.ChanUpdateDisabled != 0,
|
2019-06-28 11:59:44 +03:00
|
|
|
LastUpdate: uint32(c2.LastUpdate.Unix()),
|
2017-01-18 00:20:06 +03:00
|
|
|
}
|
2016-12-27 08:51:47 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return edge
|
|
|
|
}
|
|
|
|
|
2017-08-04 15:57:48 +03:00
|
|
|
// GetChanInfo returns the latest authenticated network announcement for the
|
2016-12-27 08:51:47 +03:00
|
|
|
// given channel identified by its channel ID: an 8-byte integer which uniquely
|
|
|
|
// identifies the location of transaction's funding output within the block
|
|
|
|
// chain.
|
2017-08-22 09:25:41 +03:00
|
|
|
func (r *rpcServer) GetChanInfo(ctx context.Context,
|
|
|
|
in *lnrpc.ChanInfoRequest) (*lnrpc.ChannelEdge, error) {
|
|
|
|
|
2016-12-27 08:51:47 +03:00
|
|
|
graph := r.server.chanDB.ChannelGraph()
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2017-03-06 04:28:12 +03:00
|
|
|
edgeInfo, edge1, edge2, err := graph.FetchChannelEdgesByID(in.ChanId)
|
2016-12-27 08:51:47 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Convert the database's edge format into the network/RPC edge format
|
|
|
|
// which couples the edge itself along with the directional node
|
|
|
|
// routing policies of each node involved within the channel.
|
2017-03-06 04:28:12 +03:00
|
|
|
channelEdge := marshalDbEdge(edgeInfo, edge1, edge2)
|
2016-12-27 08:51:47 +03:00
|
|
|
|
|
|
|
return channelEdge, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetNodeInfo returns the latest advertised and aggregate authenticated
|
|
|
|
// channel information for the specified node identified by its public key.
|
2017-08-22 09:25:41 +03:00
|
|
|
func (r *rpcServer) GetNodeInfo(ctx context.Context,
|
|
|
|
in *lnrpc.NodeInfoRequest) (*lnrpc.NodeInfo, error) {
|
|
|
|
|
2016-12-27 08:51:47 +03:00
|
|
|
graph := r.server.chanDB.ChannelGraph()
|
|
|
|
|
|
|
|
// First, parse the hex-encoded public key into a full in-memory public
|
|
|
|
// key object we can work with for querying.
|
|
|
|
pubKeyBytes, err := hex.DecodeString(in.PubKey)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
pubKey, err := btcec.ParsePubKey(pubKeyBytes, btcec.S256())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the public key decoded, attempt to fetch the node corresponding
|
|
|
|
// to this public key. If the node cannot be found, then an error will
|
|
|
|
// be returned.
|
|
|
|
node, err := graph.FetchLightningNode(pubKey)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the node obtained, we'll now iterate through all its out going
|
|
|
|
// edges to gather some basic statistics about its out going channels.
|
|
|
|
var (
|
2018-02-07 06:11:11 +03:00
|
|
|
numChannels uint32
|
|
|
|
totalCapacity btcutil.Amount
|
2018-12-10 18:37:08 +03:00
|
|
|
channels []*lnrpc.ChannelEdge
|
2016-12-27 08:51:47 +03:00
|
|
|
)
|
2018-12-10 18:37:08 +03:00
|
|
|
|
2019-06-17 21:09:27 +03:00
|
|
|
if err := node.ForEachChannel(nil, func(_ *bbolt.Tx,
|
|
|
|
edge *channeldb.ChannelEdgeInfo,
|
2018-12-10 18:37:08 +03:00
|
|
|
c1, c2 *channeldb.ChannelEdgePolicy) error {
|
2017-03-06 04:28:12 +03:00
|
|
|
|
2016-12-27 08:51:47 +03:00
|
|
|
numChannels++
|
2018-02-07 06:11:11 +03:00
|
|
|
totalCapacity += edge.Capacity
|
2018-12-10 18:37:08 +03:00
|
|
|
|
2019-06-17 21:08:44 +03:00
|
|
|
// Only populate the node's channels if the user requested them.
|
|
|
|
if in.IncludeChannels {
|
|
|
|
// Do not include unannounced channels - private
|
|
|
|
// channels or public channels whose authentication
|
|
|
|
// proof were not confirmed yet.
|
|
|
|
if edge.AuthProof == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2018-12-10 18:37:08 +03:00
|
|
|
|
2019-06-17 21:08:44 +03:00
|
|
|
// Convert the database's edge format into the
|
|
|
|
// network/RPC edge format.
|
|
|
|
channelEdge := marshalDbEdge(edge, c1, c2)
|
|
|
|
channels = append(channels, channelEdge)
|
|
|
|
}
|
2018-12-10 18:37:08 +03:00
|
|
|
|
2016-12-27 08:51:47 +03:00
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-09-21 03:15:26 +03:00
|
|
|
|
2017-02-17 12:29:23 +03:00
|
|
|
nodeAddrs := make([]*lnrpc.NodeAddress, 0)
|
|
|
|
for _, addr := range node.Addresses {
|
|
|
|
nodeAddr := &lnrpc.NodeAddress{
|
|
|
|
Network: addr.Network(),
|
|
|
|
Addr: addr.String(),
|
|
|
|
}
|
|
|
|
nodeAddrs = append(nodeAddrs, nodeAddr)
|
|
|
|
}
|
2017-12-03 05:37:34 +03:00
|
|
|
|
2016-12-27 08:51:47 +03:00
|
|
|
return &lnrpc.NodeInfo{
|
|
|
|
Node: &lnrpc.LightningNode{
|
|
|
|
LastUpdate: uint32(node.LastUpdate.Unix()),
|
|
|
|
PubKey: in.PubKey,
|
2017-02-17 12:29:23 +03:00
|
|
|
Addresses: nodeAddrs,
|
2016-12-27 08:51:47 +03:00
|
|
|
Alias: node.Alias,
|
2018-12-21 19:34:56 +03:00
|
|
|
Color: routing.EncodeHexColor(node.Color),
|
2016-12-27 08:51:47 +03:00
|
|
|
},
|
|
|
|
NumChannels: numChannels,
|
2018-02-07 06:11:11 +03:00
|
|
|
TotalCapacity: int64(totalCapacity),
|
2018-12-10 18:37:08 +03:00
|
|
|
Channels: channels,
|
2016-12-27 08:51:47 +03:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2017-03-21 05:05:37 +03:00
|
|
|
// QueryRoutes attempts to query the daemons' Channel Router for a possible
|
2016-12-27 08:51:47 +03:00
|
|
|
// route to a target destination capable of carrying a specific amount of
|
|
|
|
// satoshis within the route's flow. The retuned route contains the full
|
|
|
|
// details required to craft and send an HTLC, also including the necessary
|
2018-02-07 06:11:11 +03:00
|
|
|
// information that should be present within the Sphinx packet encapsulated
|
2016-12-27 08:51:47 +03:00
|
|
|
// within the HTLC.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): should return a slice of routes in reality
|
|
|
|
// * create separate PR to send based on well formatted route
|
2017-08-18 04:50:57 +03:00
|
|
|
func (r *rpcServer) QueryRoutes(ctx context.Context,
|
2017-03-21 05:05:37 +03:00
|
|
|
in *lnrpc.QueryRoutesRequest) (*lnrpc.QueryRoutesResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2019-03-14 16:19:35 +03:00
|
|
|
return r.routerBackend.QueryRoutes(ctx, in)
|
2016-12-27 08:51:47 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetNetworkInfo returns some basic stats about the known channel graph from
|
|
|
|
// the PoV of the node.
|
2017-08-22 09:25:41 +03:00
|
|
|
func (r *rpcServer) GetNetworkInfo(ctx context.Context,
|
|
|
|
_ *lnrpc.NetworkInfoRequest) (*lnrpc.NetworkInfo, error) {
|
|
|
|
|
2016-12-27 08:51:47 +03:00
|
|
|
graph := r.server.chanDB.ChannelGraph()
|
|
|
|
|
|
|
|
var (
|
|
|
|
numNodes uint32
|
|
|
|
numChannels uint32
|
|
|
|
maxChanOut uint32
|
|
|
|
totalNetworkCapacity btcutil.Amount
|
|
|
|
minChannelSize btcutil.Amount = math.MaxInt64
|
|
|
|
maxChannelSize btcutil.Amount
|
2019-03-27 17:04:15 +03:00
|
|
|
medianChanSize btcutil.Amount
|
2016-12-27 08:51:47 +03:00
|
|
|
)
|
|
|
|
|
2017-04-14 23:17:51 +03:00
|
|
|
// We'll use this map to de-duplicate channels during our traversal.
|
|
|
|
// This is needed since channels are directional, so there will be two
|
|
|
|
// edges for each channel within the graph.
|
|
|
|
seenChans := make(map[uint64]struct{})
|
2016-12-27 08:51:47 +03:00
|
|
|
|
2019-03-27 17:04:15 +03:00
|
|
|
// We also keep a list of all encountered capacities, in order to
|
|
|
|
// calculate the median channel size.
|
|
|
|
var allChans []btcutil.Amount
|
|
|
|
|
2017-04-14 23:17:51 +03:00
|
|
|
// We'll run through all the known nodes in the within our view of the
|
2016-12-27 08:51:47 +03:00
|
|
|
// network, tallying up the total number of nodes, and also gathering
|
2017-04-14 23:17:51 +03:00
|
|
|
// each node so we can measure the graph diameter and degree stats
|
2016-12-27 08:51:47 +03:00
|
|
|
// below.
|
2018-11-30 07:04:21 +03:00
|
|
|
if err := graph.ForEachNode(nil, func(tx *bbolt.Tx, node *channeldb.LightningNode) error {
|
2017-04-14 23:17:51 +03:00
|
|
|
// Increment the total number of nodes with each iteration.
|
2016-12-27 08:51:47 +03:00
|
|
|
numNodes++
|
|
|
|
|
2017-04-14 23:17:51 +03:00
|
|
|
// For each channel we'll compute the out degree of each node,
|
|
|
|
// and also update our running tallies of the min/max channel
|
|
|
|
// capacity, as well as the total channel capacity. We pass
|
|
|
|
// through the db transaction from the outer view so we can
|
|
|
|
// re-use it within this inner view.
|
2016-12-27 08:51:47 +03:00
|
|
|
var outDegree uint32
|
2018-11-30 07:04:21 +03:00
|
|
|
if err := node.ForEachChannel(tx, func(_ *bbolt.Tx,
|
2017-08-22 09:25:41 +03:00
|
|
|
edge *channeldb.ChannelEdgeInfo, _, _ *channeldb.ChannelEdgePolicy) error {
|
2017-03-06 04:28:12 +03:00
|
|
|
|
2017-04-14 23:17:51 +03:00
|
|
|
// Bump up the out degree for this node for each
|
|
|
|
// channel encountered.
|
2016-12-27 08:51:47 +03:00
|
|
|
outDegree++
|
|
|
|
|
2017-04-14 23:17:51 +03:00
|
|
|
// If we've already seen this channel, then we'll
|
|
|
|
// return early to ensure that we don't double-count
|
|
|
|
// stats.
|
|
|
|
if _, ok := seenChans[edge.ChannelID]; ok {
|
|
|
|
return nil
|
|
|
|
}
|
2016-12-27 08:51:47 +03:00
|
|
|
|
2017-04-14 23:17:51 +03:00
|
|
|
// Compare the capacity of this channel against the
|
|
|
|
// running min/max to see if we should update the
|
|
|
|
// extrema.
|
|
|
|
chanCapacity := edge.Capacity
|
|
|
|
if chanCapacity < minChannelSize {
|
|
|
|
minChannelSize = chanCapacity
|
|
|
|
}
|
|
|
|
if chanCapacity > maxChannelSize {
|
|
|
|
maxChannelSize = chanCapacity
|
|
|
|
}
|
2017-03-06 04:28:12 +03:00
|
|
|
|
2017-04-14 23:17:51 +03:00
|
|
|
// Accumulate the total capacity of this channel to the
|
|
|
|
// network wide-capacity.
|
|
|
|
totalNetworkCapacity += chanCapacity
|
2016-12-27 08:51:47 +03:00
|
|
|
|
2017-04-14 23:17:51 +03:00
|
|
|
numChannels++
|
2016-12-27 08:51:47 +03:00
|
|
|
|
2017-04-14 23:17:51 +03:00
|
|
|
seenChans[edge.ChannelID] = struct{}{}
|
2019-03-27 17:04:15 +03:00
|
|
|
allChans = append(allChans, edge.Capacity)
|
2017-04-14 23:17:51 +03:00
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-27 08:51:47 +03:00
|
|
|
|
2017-04-14 23:17:51 +03:00
|
|
|
// Finally, if the out degree of this node is greater than what
|
|
|
|
// we've seen so far, update the maxChanOut variable.
|
|
|
|
if outDegree > maxChanOut {
|
|
|
|
maxChanOut = outDegree
|
|
|
|
}
|
2016-12-27 08:51:47 +03:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-07-16 02:59:31 +03:00
|
|
|
// Query the graph for the current number of zombie channels.
|
|
|
|
numZombies, err := graph.NumZombies()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-03-27 17:04:15 +03:00
|
|
|
// Find the median.
|
|
|
|
medianChanSize = autopilot.Median(allChans)
|
|
|
|
|
2017-05-25 03:28:35 +03:00
|
|
|
// If we don't have any channels, then reset the minChannelSize to zero
|
2017-12-03 05:37:34 +03:00
|
|
|
// to avoid outputting NaN in encoded JSON.
|
2017-05-25 03:28:35 +03:00
|
|
|
if numChannels == 0 {
|
|
|
|
minChannelSize = 0
|
|
|
|
}
|
|
|
|
|
2017-01-30 02:02:57 +03:00
|
|
|
// TODO(roasbeef): graph diameter
|
|
|
|
|
2016-12-27 08:51:47 +03:00
|
|
|
// TODO(roasbeef): also add oldest channel?
|
2017-05-25 03:28:35 +03:00
|
|
|
netInfo := &lnrpc.NetworkInfo{
|
2016-12-27 08:51:47 +03:00
|
|
|
MaxOutDegree: maxChanOut,
|
2019-03-24 15:22:15 +03:00
|
|
|
AvgOutDegree: float64(2*numChannels) / float64(numNodes),
|
2016-12-27 08:51:47 +03:00
|
|
|
NumNodes: numNodes,
|
|
|
|
NumChannels: numChannels,
|
|
|
|
TotalNetworkCapacity: int64(totalNetworkCapacity),
|
|
|
|
AvgChannelSize: float64(totalNetworkCapacity) / float64(numChannels),
|
2017-05-25 03:28:35 +03:00
|
|
|
|
2019-03-27 17:04:15 +03:00
|
|
|
MinChannelSize: int64(minChannelSize),
|
|
|
|
MaxChannelSize: int64(maxChannelSize),
|
|
|
|
MedianChannelSizeSat: int64(medianChanSize),
|
2019-07-16 02:59:31 +03:00
|
|
|
NumZombieChans: numZombies,
|
2017-05-25 03:28:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Similarly, if we don't have any channels, then we'll also set the
|
|
|
|
// average channel size to zero in order to avoid weird JSON encoding
|
|
|
|
// outputs.
|
|
|
|
if numChannels == 0 {
|
|
|
|
netInfo.AvgChannelSize = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return netInfo, nil
|
2016-07-15 14:02:59 +03:00
|
|
|
}
|
2016-12-05 14:59:36 +03:00
|
|
|
|
2017-05-12 00:55:56 +03:00
|
|
|
// StopDaemon will send a shutdown request to the interrupt handler, triggering
|
|
|
|
// a graceful shutdown of the daemon.
|
2017-08-22 09:25:41 +03:00
|
|
|
func (r *rpcServer) StopDaemon(ctx context.Context,
|
|
|
|
_ *lnrpc.StopRequest) (*lnrpc.StopResponse, error) {
|
|
|
|
|
2018-06-15 06:19:45 +03:00
|
|
|
signal.RequestShutdown()
|
2017-05-12 00:55:56 +03:00
|
|
|
return &lnrpc.StopResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2017-03-14 06:39:16 +03:00
|
|
|
// SubscribeChannelGraph launches a streaming RPC that allows the caller to
|
|
|
|
// receive notifications upon any changes the channel graph topology from the
|
|
|
|
// review of the responding node. Events notified include: new nodes coming
|
|
|
|
// online, nodes updating their authenticated attributes, new channels being
|
|
|
|
// advertised, updates in the routing policy for a directional channel edge,
|
|
|
|
// and finally when prior channels are closed on-chain.
|
|
|
|
func (r *rpcServer) SubscribeChannelGraph(req *lnrpc.GraphTopologySubscription,
|
|
|
|
updateStream lnrpc.Lightning_SubscribeChannelGraphServer) error {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2017-03-14 06:39:16 +03:00
|
|
|
// First, we start by subscribing to a new intent to receive
|
|
|
|
// notifications from the channel router.
|
|
|
|
client, err := r.server.chanRouter.SubscribeTopology()
|
|
|
|
if err != nil {
|
2017-03-15 06:06:33 +03:00
|
|
|
return err
|
2017-03-14 06:39:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that the resources for the topology update client is cleaned
|
|
|
|
// up once either the server, or client exists.
|
|
|
|
defer client.Cancel()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
|
|
|
|
// A new update has been sent by the channel router, we'll
|
2017-03-15 06:06:33 +03:00
|
|
|
// marshal it into the form expected by the gRPC client, then
|
2017-03-14 06:39:16 +03:00
|
|
|
// send it off.
|
|
|
|
case topChange, ok := <-client.TopologyChanges:
|
|
|
|
// If the second value from the channel read is nil,
|
|
|
|
// then this means that the channel router is exiting
|
|
|
|
// or the notification client was cancelled. So we'll
|
|
|
|
// exit early.
|
|
|
|
if !ok {
|
2017-05-12 00:55:56 +03:00
|
|
|
return errors.New("server shutting down")
|
2017-03-14 06:39:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Convert the struct from the channel router into the
|
|
|
|
// form expected by the gRPC service then send it off
|
|
|
|
// to the client.
|
|
|
|
graphUpdate := marshallTopologyChange(topChange)
|
|
|
|
if err := updateStream.Send(graphUpdate); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// The server is quitting, so we'll exit immediately. Returning
|
|
|
|
// nil will close the clients read end of the stream.
|
|
|
|
case <-r.quit:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
// marshallTopologyChange performs a mapping from the topology change struct
|
2017-03-14 06:39:16 +03:00
|
|
|
// returned by the router to the form of notifications expected by the current
|
|
|
|
// gRPC service.
|
|
|
|
func marshallTopologyChange(topChange *routing.TopologyChange) *lnrpc.GraphTopologyUpdate {
|
|
|
|
|
|
|
|
// encodeKey is a simple helper function that converts a live public
|
|
|
|
// key into a hex-encoded version of the compressed serialization for
|
|
|
|
// the public key.
|
|
|
|
encodeKey := func(k *btcec.PublicKey) string {
|
|
|
|
return hex.EncodeToString(k.SerializeCompressed())
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeUpdates := make([]*lnrpc.NodeUpdate, len(topChange.NodeUpdates))
|
|
|
|
for i, nodeUpdate := range topChange.NodeUpdates {
|
|
|
|
addrs := make([]string, len(nodeUpdate.Addresses))
|
|
|
|
for i, addr := range nodeUpdate.Addresses {
|
|
|
|
addrs[i] = addr.String()
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeUpdates[i] = &lnrpc.NodeUpdate{
|
|
|
|
Addresses: addrs,
|
|
|
|
IdentityKey: encodeKey(nodeUpdate.IdentityKey),
|
|
|
|
GlobalFeatures: nodeUpdate.GlobalFeatures,
|
|
|
|
Alias: nodeUpdate.Alias,
|
2018-12-21 19:34:56 +03:00
|
|
|
Color: nodeUpdate.Color,
|
2017-03-14 06:39:16 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
channelUpdates := make([]*lnrpc.ChannelEdgeUpdate, len(topChange.ChannelEdgeUpdates))
|
|
|
|
for i, channelUpdate := range topChange.ChannelEdgeUpdates {
|
|
|
|
channelUpdates[i] = &lnrpc.ChannelEdgeUpdate{
|
|
|
|
ChanId: channelUpdate.ChanID,
|
|
|
|
ChanPoint: &lnrpc.ChannelPoint{
|
2018-01-11 07:59:30 +03:00
|
|
|
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
|
|
|
|
FundingTxidBytes: channelUpdate.ChanPoint.Hash[:],
|
|
|
|
},
|
2017-03-14 06:39:16 +03:00
|
|
|
OutputIndex: channelUpdate.ChanPoint.Index,
|
|
|
|
},
|
|
|
|
Capacity: int64(channelUpdate.Capacity),
|
|
|
|
RoutingPolicy: &lnrpc.RoutingPolicy{
|
|
|
|
TimeLockDelta: uint32(channelUpdate.TimeLockDelta),
|
|
|
|
MinHtlc: int64(channelUpdate.MinHTLC),
|
2019-02-25 18:49:39 +03:00
|
|
|
MaxHtlcMsat: uint64(channelUpdate.MaxHTLC),
|
2017-03-14 06:39:16 +03:00
|
|
|
FeeBaseMsat: int64(channelUpdate.BaseFee),
|
|
|
|
FeeRateMilliMsat: int64(channelUpdate.FeeRate),
|
2018-06-14 05:38:41 +03:00
|
|
|
Disabled: channelUpdate.Disabled,
|
2017-03-14 06:39:16 +03:00
|
|
|
},
|
|
|
|
AdvertisingNode: encodeKey(channelUpdate.AdvertisingNode),
|
|
|
|
ConnectingNode: encodeKey(channelUpdate.ConnectingNode),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
closedChans := make([]*lnrpc.ClosedChannelUpdate, len(topChange.ClosedChannels))
|
|
|
|
for i, closedChan := range topChange.ClosedChannels {
|
|
|
|
closedChans[i] = &lnrpc.ClosedChannelUpdate{
|
|
|
|
ChanId: closedChan.ChanID,
|
|
|
|
Capacity: int64(closedChan.Capacity),
|
|
|
|
ClosedHeight: closedChan.ClosedHeight,
|
|
|
|
ChanPoint: &lnrpc.ChannelPoint{
|
2018-01-11 07:59:30 +03:00
|
|
|
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
|
|
|
|
FundingTxidBytes: closedChan.ChanPoint.Hash[:],
|
|
|
|
},
|
2017-03-14 06:39:16 +03:00
|
|
|
OutputIndex: closedChan.ChanPoint.Index,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &lnrpc.GraphTopologyUpdate{
|
|
|
|
NodeUpdates: nodeUpdates,
|
|
|
|
ChannelUpdates: channelUpdates,
|
|
|
|
ClosedChans: closedChans,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-05 14:59:36 +03:00
|
|
|
// ListPayments returns a list of all outgoing payments.
|
2017-08-18 04:50:57 +03:00
|
|
|
func (r *rpcServer) ListPayments(ctx context.Context,
|
2019-06-11 15:59:05 +03:00
|
|
|
req *lnrpc.ListPaymentsRequest) (*lnrpc.ListPaymentsResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2016-12-05 14:59:36 +03:00
|
|
|
rpcsLog.Debugf("[ListPayments]")
|
2016-12-21 12:19:01 +03:00
|
|
|
|
2019-05-23 21:05:29 +03:00
|
|
|
payments, err := r.server.chanDB.FetchPayments()
|
|
|
|
if err != nil {
|
2016-12-05 14:59:36 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
2016-12-21 12:19:01 +03:00
|
|
|
|
2019-06-11 15:59:05 +03:00
|
|
|
paymentsResp := &lnrpc.ListPaymentsResponse{}
|
|
|
|
for _, payment := range payments {
|
|
|
|
// To keep compatibility with the old API, we only return
|
|
|
|
// non-suceeded payments if requested.
|
|
|
|
if payment.Status != channeldb.StatusSucceeded &&
|
2019-06-14 02:00:38 +03:00
|
|
|
!req.IncludeIncomplete {
|
2019-06-11 15:59:05 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-05-23 21:05:29 +03:00
|
|
|
// If a payment attempt has been made we can fetch the route.
|
|
|
|
// Otherwise we'll just populate the RPC response with an empty
|
|
|
|
// one.
|
|
|
|
var route route.Route
|
|
|
|
if payment.Attempt != nil {
|
|
|
|
route = payment.Attempt.Route
|
|
|
|
}
|
|
|
|
path := make([]string, len(route.Hops))
|
|
|
|
for i, hop := range route.Hops {
|
|
|
|
path[i] = hex.EncodeToString(hop.PubKeyBytes[:])
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this payment is settled, the preimage will be available.
|
|
|
|
var preimage lntypes.Preimage
|
|
|
|
if payment.PaymentPreimage != nil {
|
|
|
|
preimage = *payment.PaymentPreimage
|
2016-12-31 03:41:59 +03:00
|
|
|
}
|
|
|
|
|
2019-05-23 21:05:29 +03:00
|
|
|
msatValue := int64(payment.Info.Value)
|
|
|
|
satValue := int64(payment.Info.Value.ToSatoshis())
|
2018-09-16 15:44:11 +03:00
|
|
|
|
2019-06-11 15:59:05 +03:00
|
|
|
status, err := convertPaymentStatus(payment.Status)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-05-23 21:05:29 +03:00
|
|
|
paymentHash := payment.Info.PaymentHash
|
2019-06-11 15:59:05 +03:00
|
|
|
paymentsResp.Payments = append(paymentsResp.Payments, &lnrpc.Payment{
|
2017-12-14 04:04:18 +03:00
|
|
|
PaymentHash: hex.EncodeToString(paymentHash[:]),
|
2018-09-16 15:44:11 +03:00
|
|
|
Value: satValue,
|
|
|
|
ValueMsat: msatValue,
|
|
|
|
ValueSat: satValue,
|
2019-05-23 21:05:29 +03:00
|
|
|
CreationDate: payment.Info.CreationDate.Unix(),
|
2017-12-14 04:04:18 +03:00
|
|
|
Path: path,
|
2019-05-23 21:05:29 +03:00
|
|
|
Fee: int64(route.TotalFees().ToSatoshis()),
|
2019-07-09 00:38:44 +03:00
|
|
|
FeeSat: int64(route.TotalFees().ToSatoshis()),
|
|
|
|
FeeMsat: int64(route.TotalFees()),
|
2019-05-23 21:05:29 +03:00
|
|
|
PaymentPreimage: hex.EncodeToString(preimage[:]),
|
2019-05-30 02:31:32 +03:00
|
|
|
PaymentRequest: string(payment.Info.PaymentRequest),
|
2019-06-11 15:59:05 +03:00
|
|
|
Status: status,
|
|
|
|
})
|
2016-12-05 14:59:36 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return paymentsResp, nil
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:59:05 +03:00
|
|
|
// convertPaymentStatus converts a channeldb.PaymentStatus to the type expected
|
|
|
|
// by the RPC.
|
|
|
|
func convertPaymentStatus(dbStatus channeldb.PaymentStatus) (
|
|
|
|
lnrpc.Payment_PaymentStatus, error) {
|
|
|
|
|
|
|
|
switch dbStatus {
|
|
|
|
case channeldb.StatusUnknown:
|
|
|
|
return lnrpc.Payment_UNKNOWN, nil
|
|
|
|
|
|
|
|
case channeldb.StatusInFlight:
|
|
|
|
return lnrpc.Payment_IN_FLIGHT, nil
|
|
|
|
|
|
|
|
case channeldb.StatusSucceeded:
|
|
|
|
return lnrpc.Payment_SUCCEEDED, nil
|
|
|
|
|
|
|
|
case channeldb.StatusFailed:
|
|
|
|
return lnrpc.Payment_FAILED, nil
|
|
|
|
|
|
|
|
default:
|
|
|
|
return 0, fmt.Errorf("unhandled payment status %v", dbStatus)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-05 14:59:36 +03:00
|
|
|
// DeleteAllPayments deletes all outgoing payments from DB.
|
2017-08-18 04:50:57 +03:00
|
|
|
func (r *rpcServer) DeleteAllPayments(ctx context.Context,
|
|
|
|
_ *lnrpc.DeleteAllPaymentsRequest) (*lnrpc.DeleteAllPaymentsResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2016-12-05 14:59:36 +03:00
|
|
|
rpcsLog.Debugf("[DeleteAllPayments]")
|
2016-12-21 12:19:01 +03:00
|
|
|
|
2019-05-23 21:05:29 +03:00
|
|
|
if err := r.server.chanDB.DeletePayments(); err != nil {
|
2016-12-31 03:41:59 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return &lnrpc.DeleteAllPaymentsResponse{}, nil
|
2016-12-27 08:51:18 +03:00
|
|
|
}
|
2016-12-27 08:51:47 +03:00
|
|
|
|
2017-01-15 05:16:53 +03:00
|
|
|
// DebugLevel allows a caller to programmatically set the logging verbosity of
|
|
|
|
// lnd. The logging can be targeted according to a coarse daemon-wide logging
|
|
|
|
// level, or in a granular fashion to specify the logging for a target
|
|
|
|
// sub-system.
|
|
|
|
func (r *rpcServer) DebugLevel(ctx context.Context,
|
|
|
|
req *lnrpc.DebugLevelRequest) (*lnrpc.DebugLevelResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2017-01-15 05:16:53 +03:00
|
|
|
// If show is set, then we simply print out the list of available
|
|
|
|
// sub-systems.
|
|
|
|
if req.Show {
|
|
|
|
return &lnrpc.DebugLevelResponse{
|
|
|
|
SubSystems: strings.Join(supportedSubsystems(), " "),
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
rpcsLog.Infof("[debuglevel] changing debug level to: %v", req.LevelSpec)
|
|
|
|
|
|
|
|
// Otherwise, we'll attempt to set the logging level using the
|
|
|
|
// specified level spec.
|
|
|
|
if err := parseAndSetDebugLevels(req.LevelSpec); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return &lnrpc.DebugLevelResponse{}, nil
|
|
|
|
}
|
2017-01-18 00:39:30 +03:00
|
|
|
|
|
|
|
// DecodePayReq takes an encoded payment request string and attempts to decode
|
|
|
|
// it, returning a full description of the conditions encoded within the
|
|
|
|
// payment request.
|
|
|
|
func (r *rpcServer) DecodePayReq(ctx context.Context,
|
|
|
|
req *lnrpc.PayReqString) (*lnrpc.PayReq, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
|
|
|
rpcsLog.Tracef("[decodepayreq] decoding: %v", req.PayReq)
|
|
|
|
|
2017-01-18 00:39:30 +03:00
|
|
|
// Fist we'll attempt to decode the payment request string, if the
|
|
|
|
// request is invalid or the checksum doesn't match, then we'll exit
|
|
|
|
// here with an error.
|
2018-02-19 18:20:54 +03:00
|
|
|
payReq, err := zpay32.Decode(req.PayReq, activeNetParams.Params)
|
2017-01-18 00:39:30 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-09-05 19:08:02 +03:00
|
|
|
// Let the fields default to empty strings.
|
|
|
|
desc := ""
|
|
|
|
if payReq.Description != nil {
|
|
|
|
desc = *payReq.Description
|
|
|
|
}
|
|
|
|
|
|
|
|
descHash := []byte("")
|
|
|
|
if payReq.DescriptionHash != nil {
|
|
|
|
descHash = payReq.DescriptionHash[:]
|
|
|
|
}
|
|
|
|
|
|
|
|
fallbackAddr := ""
|
|
|
|
if payReq.FallbackAddr != nil {
|
|
|
|
fallbackAddr = payReq.FallbackAddr.String()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Expiry time will default to 3600 seconds if not specified
|
|
|
|
// explicitly.
|
|
|
|
expiry := int64(payReq.Expiry().Seconds())
|
|
|
|
|
2018-03-28 07:01:21 +03:00
|
|
|
// Convert between the `lnrpc` and `routing` types.
|
2019-01-03 21:13:08 +03:00
|
|
|
routeHints := invoicesrpc.CreateRPCRouteHints(payReq.RouteHints)
|
2018-03-28 07:01:21 +03:00
|
|
|
|
2017-09-05 19:08:02 +03:00
|
|
|
amt := int64(0)
|
|
|
|
if payReq.MilliSat != nil {
|
|
|
|
amt = int64(payReq.MilliSat.ToSatoshis())
|
|
|
|
}
|
|
|
|
|
2017-01-18 00:39:30 +03:00
|
|
|
dest := payReq.Destination.SerializeCompressed()
|
|
|
|
return &lnrpc.PayReq{
|
2017-09-05 19:08:02 +03:00
|
|
|
Destination: hex.EncodeToString(dest),
|
|
|
|
PaymentHash: hex.EncodeToString(payReq.PaymentHash[:]),
|
|
|
|
NumSatoshis: amt,
|
|
|
|
Timestamp: payReq.Timestamp.Unix(),
|
|
|
|
Description: desc,
|
|
|
|
DescriptionHash: hex.EncodeToString(descHash[:]),
|
|
|
|
FallbackAddr: fallbackAddr,
|
|
|
|
Expiry: expiry,
|
2017-10-19 08:16:40 +03:00
|
|
|
CltvExpiry: int64(payReq.MinFinalCLTVExpiry()),
|
2018-03-28 07:01:21 +03:00
|
|
|
RouteHints: routeHints,
|
2017-01-18 00:39:30 +03:00
|
|
|
}, nil
|
|
|
|
}
|
2017-08-22 10:09:43 +03:00
|
|
|
|
|
|
|
// feeBase is the fixed point that fee rate computation are performed over.
|
|
|
|
// Nodes on the network advertise their fee rate using this point as a base.
|
|
|
|
// This means that the minimal possible fee rate if 1e-6, or 0.000001, or
|
|
|
|
// 0.0001%.
|
|
|
|
const feeBase = 1000000
|
|
|
|
|
|
|
|
// FeeReport allows the caller to obtain a report detailing the current fee
|
|
|
|
// schedule enforced by the node globally for each channel.
|
|
|
|
func (r *rpcServer) FeeReport(ctx context.Context,
|
|
|
|
_ *lnrpc.FeeReportRequest) (*lnrpc.FeeReportResponse, error) {
|
|
|
|
|
|
|
|
// TODO(roasbeef): use UnaryInterceptor to add automated logging
|
|
|
|
|
2018-02-28 09:23:09 +03:00
|
|
|
rpcsLog.Debugf("[feereport]")
|
|
|
|
|
2017-08-22 10:09:43 +03:00
|
|
|
channelGraph := r.server.chanDB.ChannelGraph()
|
|
|
|
selfNode, err := channelGraph.SourceNode()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var feeReports []*lnrpc.ChannelFeeReport
|
2018-11-30 07:04:21 +03:00
|
|
|
err = selfNode.ForEachChannel(nil, func(_ *bbolt.Tx, chanInfo *channeldb.ChannelEdgeInfo,
|
2017-08-22 10:09:43 +03:00
|
|
|
edgePolicy, _ *channeldb.ChannelEdgePolicy) error {
|
|
|
|
|
2018-06-18 13:35:22 +03:00
|
|
|
// Self node should always have policies for its channels.
|
|
|
|
if edgePolicy == nil {
|
|
|
|
return fmt.Errorf("no policy for outgoing channel %v ",
|
|
|
|
chanInfo.ChannelID)
|
|
|
|
}
|
|
|
|
|
2017-08-22 10:09:43 +03:00
|
|
|
// We'll compute the effective fee rate by converting from a
|
|
|
|
// fixed point fee rate to a floating point fee rate. The fee
|
|
|
|
// rate field in the database the amount of mSAT charged per
|
|
|
|
// 1mil mSAT sent, so will divide by this to get the proper fee
|
|
|
|
// rate.
|
|
|
|
feeRateFixedPoint := edgePolicy.FeeProportionalMillionths
|
|
|
|
feeRate := float64(feeRateFixedPoint) / float64(feeBase)
|
|
|
|
|
|
|
|
// TODO(roasbeef): also add stats for revenue for each channel
|
|
|
|
feeReports = append(feeReports, &lnrpc.ChannelFeeReport{
|
|
|
|
ChanPoint: chanInfo.ChannelPoint.String(),
|
|
|
|
BaseFeeMsat: int64(edgePolicy.FeeBaseMSat),
|
|
|
|
FeePerMil: int64(feeRateFixedPoint),
|
|
|
|
FeeRate: feeRate,
|
|
|
|
})
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-02-28 09:23:09 +03:00
|
|
|
fwdEventLog := r.server.chanDB.ForwardingLog()
|
|
|
|
|
|
|
|
// computeFeeSum is a helper function that computes the total fees for
|
|
|
|
// a particular time slice described by a forwarding event query.
|
|
|
|
computeFeeSum := func(query channeldb.ForwardingEventQuery) (lnwire.MilliSatoshi, error) {
|
|
|
|
|
|
|
|
var totalFees lnwire.MilliSatoshi
|
|
|
|
|
|
|
|
// We'll continue to fetch the next query and accumulate the
|
|
|
|
// fees until the next query returns no events.
|
|
|
|
for {
|
|
|
|
timeSlice, err := fwdEventLog.Query(query)
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the timeslice is empty, then we'll return as
|
|
|
|
// we've retrieved all the entries in this range.
|
|
|
|
if len(timeSlice.ForwardingEvents) == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we'll tally up an accumulate the total
|
|
|
|
// fees for this time slice.
|
|
|
|
for _, event := range timeSlice.ForwardingEvents {
|
|
|
|
fee := event.AmtIn - event.AmtOut
|
|
|
|
totalFees += fee
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll now take the last offset index returned as
|
|
|
|
// part of this response, and modify our query to start
|
|
|
|
// at this index. This has a pagination effect in the
|
|
|
|
// case that our query bounds has more than 100k
|
|
|
|
// entries.
|
|
|
|
query.IndexOffset = timeSlice.LastIndexOffset
|
|
|
|
}
|
|
|
|
|
|
|
|
return totalFees, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
now := time.Now()
|
|
|
|
|
|
|
|
// Before we perform the queries below, we'll instruct the switch to
|
|
|
|
// flush any pending events to disk. This ensure we get a complete
|
|
|
|
// snapshot at this particular time.
|
2019-03-11 23:23:21 +03:00
|
|
|
if err := r.server.htlcSwitch.FlushForwardingEvents(); err != nil {
|
2018-02-28 09:23:09 +03:00
|
|
|
return nil, fmt.Errorf("unable to flush forwarding "+
|
|
|
|
"events: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// In addition to returning the current fee schedule for each channel.
|
|
|
|
// We'll also perform a series of queries to obtain the total fees
|
|
|
|
// earned over the past day, week, and month.
|
|
|
|
dayQuery := channeldb.ForwardingEventQuery{
|
|
|
|
StartTime: now.Add(-time.Hour * 24),
|
|
|
|
EndTime: now,
|
|
|
|
NumMaxEvents: 1000,
|
|
|
|
}
|
|
|
|
dayFees, err := computeFeeSum(dayQuery)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to retrieve day fees: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
weekQuery := channeldb.ForwardingEventQuery{
|
|
|
|
StartTime: now.Add(-time.Hour * 24 * 7),
|
|
|
|
EndTime: now,
|
|
|
|
NumMaxEvents: 1000,
|
|
|
|
}
|
|
|
|
weekFees, err := computeFeeSum(weekQuery)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to retrieve day fees: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
monthQuery := channeldb.ForwardingEventQuery{
|
|
|
|
StartTime: now.Add(-time.Hour * 24 * 30),
|
|
|
|
EndTime: now,
|
|
|
|
NumMaxEvents: 1000,
|
|
|
|
}
|
|
|
|
monthFees, err := computeFeeSum(monthQuery)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to retrieve day fees: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-08-22 11:00:07 +03:00
|
|
|
return &lnrpc.FeeReportResponse{
|
|
|
|
ChannelFees: feeReports,
|
2018-02-28 09:23:09 +03:00
|
|
|
DayFeeSum: uint64(dayFees.ToSatoshis()),
|
|
|
|
WeekFeeSum: uint64(weekFees.ToSatoshis()),
|
|
|
|
MonthFeeSum: uint64(monthFees.ToSatoshis()),
|
2017-08-22 11:00:07 +03:00
|
|
|
}, nil
|
2017-08-22 10:09:43 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// minFeeRate is the smallest permitted fee rate within the network. This is
|
2018-02-07 06:11:11 +03:00
|
|
|
// derived by the fact that fee rates are computed using a fixed point of
|
2017-08-22 10:09:43 +03:00
|
|
|
// 1,000,000. As a result, the smallest representable fee rate is 1e-6, or
|
|
|
|
// 0.000001, or 0.0001%.
|
|
|
|
const minFeeRate = 1e-6
|
|
|
|
|
2017-12-17 01:14:58 +03:00
|
|
|
// UpdateChannelPolicy allows the caller to update the channel forwarding policy
|
|
|
|
// for all channels globally, or a particular channel.
|
|
|
|
func (r *rpcServer) UpdateChannelPolicy(ctx context.Context,
|
|
|
|
req *lnrpc.PolicyUpdateRequest) (*lnrpc.PolicyUpdateResponse, error) {
|
2017-08-22 10:09:43 +03:00
|
|
|
|
|
|
|
var targetChans []wire.OutPoint
|
|
|
|
switch scope := req.Scope.(type) {
|
|
|
|
// If the request is targeting all active channels, then we don't need
|
|
|
|
// target any channels by their channel point.
|
2017-12-17 01:14:58 +03:00
|
|
|
case *lnrpc.PolicyUpdateRequest_Global:
|
2017-08-22 10:09:43 +03:00
|
|
|
|
|
|
|
// Otherwise, we're targeting an individual channel by its channel
|
|
|
|
// point.
|
2017-12-17 01:14:58 +03:00
|
|
|
case *lnrpc.PolicyUpdateRequest_ChanPoint:
|
2019-05-15 10:02:53 +03:00
|
|
|
txid, err := GetChanPointFundingTxid(scope.ChanPoint)
|
2017-08-22 10:09:43 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
targetChans = append(targetChans, wire.OutPoint{
|
|
|
|
Hash: *txid,
|
|
|
|
Index: scope.ChanPoint.OutputIndex,
|
|
|
|
})
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("unknown scope: %v", scope)
|
|
|
|
}
|
|
|
|
|
2019-05-30 00:52:42 +03:00
|
|
|
switch {
|
|
|
|
// As a sanity check, if the fee isn't zero, we'll ensure that the
|
|
|
|
// passed fee rate is below 1e-6, or the lowest allowed non-zero fee
|
|
|
|
// rate expressible within the protocol.
|
|
|
|
case req.FeeRate != 0 && req.FeeRate < minFeeRate:
|
2017-08-22 10:09:43 +03:00
|
|
|
return nil, fmt.Errorf("fee rate of %v is too small, min fee "+
|
|
|
|
"rate is %v", req.FeeRate, minFeeRate)
|
|
|
|
|
2019-05-30 00:52:42 +03:00
|
|
|
// We'll also ensure that the user isn't setting a CLTV delta that
|
|
|
|
// won't give outgoing HTLCs enough time to fully resolve if needed.
|
|
|
|
case req.TimeLockDelta < minTimeLockDelta:
|
2017-12-17 01:14:58 +03:00
|
|
|
return nil, fmt.Errorf("time lock delta of %v is too small, "+
|
|
|
|
"minimum supported is %v", req.TimeLockDelta,
|
|
|
|
minTimeLockDelta)
|
|
|
|
}
|
|
|
|
|
2017-08-22 10:09:43 +03:00
|
|
|
// We'll also need to convert the floating point fee rate we accept
|
|
|
|
// over RPC to the fixed point rate that we use within the protocol. We
|
|
|
|
// do this by multiplying the passed fee rate by the fee base. This
|
|
|
|
// gives us the fixed point, scaled by 1 million that's used within the
|
|
|
|
// protocol.
|
|
|
|
feeRateFixed := uint32(req.FeeRate * feeBase)
|
|
|
|
baseFeeMsat := lnwire.MilliSatoshi(req.BaseFeeMsat)
|
|
|
|
feeSchema := routing.FeeSchema{
|
|
|
|
BaseFee: baseFeeMsat,
|
|
|
|
FeeRate: feeRateFixed,
|
|
|
|
}
|
|
|
|
|
2017-12-17 01:14:58 +03:00
|
|
|
chanPolicy := routing.ChannelPolicy{
|
|
|
|
FeeSchema: feeSchema,
|
|
|
|
TimeLockDelta: req.TimeLockDelta,
|
|
|
|
}
|
|
|
|
|
2018-04-04 06:18:42 +03:00
|
|
|
rpcsLog.Debugf("[updatechanpolicy] updating channel policy base_fee=%v, "+
|
2017-12-17 01:14:58 +03:00
|
|
|
"rate_float=%v, rate_fixed=%v, time_lock_delta: %v, targets=%v",
|
|
|
|
req.BaseFeeMsat, req.FeeRate, feeRateFixed, req.TimeLockDelta,
|
2017-08-22 10:09:43 +03:00
|
|
|
spew.Sdump(targetChans))
|
|
|
|
|
|
|
|
// With the scope resolved, we'll now send this to the
|
2017-12-17 01:14:58 +03:00
|
|
|
// AuthenticatedGossiper so it can propagate the new policy for our
|
2017-08-22 10:09:43 +03:00
|
|
|
// target channel(s).
|
2017-12-17 01:14:58 +03:00
|
|
|
err := r.server.authGossiper.PropagateChanPolicyUpdate(
|
|
|
|
chanPolicy, targetChans...,
|
2017-08-22 10:09:43 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, we'll apply the set of active links amongst the target
|
|
|
|
// channels.
|
|
|
|
//
|
|
|
|
// We create a partially policy as the logic won't overwrite a valid
|
|
|
|
// sub-policy with a "nil" one.
|
|
|
|
p := htlcswitch.ForwardingPolicy{
|
2017-12-17 01:14:58 +03:00
|
|
|
BaseFee: baseFeeMsat,
|
|
|
|
FeeRate: lnwire.MilliSatoshi(feeRateFixed),
|
|
|
|
TimeLockDelta: req.TimeLockDelta,
|
2017-08-22 10:09:43 +03:00
|
|
|
}
|
|
|
|
err = r.server.htlcSwitch.UpdateForwardingPolicies(p, targetChans...)
|
|
|
|
if err != nil {
|
|
|
|
// If we're unable update the fees due to the links not being
|
|
|
|
// online, then we don't need to fail the call. We'll simply
|
|
|
|
// log the failure.
|
|
|
|
rpcsLog.Warnf("Unable to update link fees: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-12-17 01:14:58 +03:00
|
|
|
return &lnrpc.PolicyUpdateResponse{}, nil
|
2017-08-22 10:09:43 +03:00
|
|
|
}
|
2018-02-28 09:23:27 +03:00
|
|
|
|
|
|
|
// ForwardingHistory allows the caller to query the htlcswitch for a record of
|
|
|
|
// all HTLC's forwarded within the target time range, and integer offset within
|
|
|
|
// that time range. If no time-range is specified, then the first chunk of the
|
|
|
|
// past 24 hrs of forwarding history are returned.
|
|
|
|
|
|
|
|
// A list of forwarding events are returned. The size of each forwarding event
|
|
|
|
// is 40 bytes, and the max message size able to be returned in gRPC is 4 MiB.
|
|
|
|
// In order to safely stay under this max limit, we'll return 50k events per
|
|
|
|
// response. Each response has the index offset of the last entry. The index
|
|
|
|
// offset can be provided to the request to allow the caller to skip a series
|
|
|
|
// of records.
|
|
|
|
func (r *rpcServer) ForwardingHistory(ctx context.Context,
|
|
|
|
req *lnrpc.ForwardingHistoryRequest) (*lnrpc.ForwardingHistoryResponse, error) {
|
|
|
|
|
|
|
|
rpcsLog.Debugf("[forwardinghistory]")
|
|
|
|
|
|
|
|
// Before we perform the queries below, we'll instruct the switch to
|
|
|
|
// flush any pending events to disk. This ensure we get a complete
|
|
|
|
// snapshot at this particular time.
|
|
|
|
if err := r.server.htlcSwitch.FlushForwardingEvents(); err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to flush forwarding "+
|
|
|
|
"events: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
startTime, endTime time.Time
|
|
|
|
|
|
|
|
numEvents uint32
|
|
|
|
)
|
|
|
|
|
2019-07-09 01:18:35 +03:00
|
|
|
// If the start time wasn't specified, we'll default to 24 hours ago.
|
|
|
|
if req.StartTime == 0 {
|
2018-02-28 09:23:27 +03:00
|
|
|
now := time.Now()
|
|
|
|
startTime = now.Add(-time.Hour * 24)
|
|
|
|
} else {
|
|
|
|
startTime = time.Unix(int64(req.StartTime), 0)
|
2019-07-09 01:18:35 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// If the end time wasn't specified, assume a default end time of now.
|
|
|
|
if req.EndTime == 0 {
|
|
|
|
now := time.Now()
|
|
|
|
endTime = now
|
|
|
|
} else {
|
2018-02-28 09:23:27 +03:00
|
|
|
endTime = time.Unix(int64(req.EndTime), 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the number of events wasn't specified, then we'll default to
|
|
|
|
// returning the last 100 events.
|
|
|
|
numEvents = req.NumMaxEvents
|
|
|
|
if numEvents == 0 {
|
|
|
|
numEvents = 100
|
|
|
|
}
|
|
|
|
|
2019-07-09 01:18:35 +03:00
|
|
|
// Next, we'll map the proto request into a format that is understood by
|
2018-02-28 09:23:27 +03:00
|
|
|
// the forwarding log.
|
|
|
|
eventQuery := channeldb.ForwardingEventQuery{
|
|
|
|
StartTime: startTime,
|
|
|
|
EndTime: endTime,
|
|
|
|
IndexOffset: req.IndexOffset,
|
|
|
|
NumMaxEvents: numEvents,
|
|
|
|
}
|
|
|
|
timeSlice, err := r.server.chanDB.ForwardingLog().Query(eventQuery)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to query forwarding log: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(roasbeef): add settlement latency?
|
|
|
|
// * use FPE on all records?
|
|
|
|
|
|
|
|
// With the events retrieved, we'll now map them into the proper proto
|
|
|
|
// response.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): show in ns for the outside?
|
|
|
|
resp := &lnrpc.ForwardingHistoryResponse{
|
|
|
|
ForwardingEvents: make([]*lnrpc.ForwardingEvent, len(timeSlice.ForwardingEvents)),
|
|
|
|
LastOffsetIndex: timeSlice.LastIndexOffset,
|
|
|
|
}
|
|
|
|
for i, event := range timeSlice.ForwardingEvents {
|
|
|
|
amtInSat := event.AmtIn.ToSatoshis()
|
|
|
|
amtOutSat := event.AmtOut.ToSatoshis()
|
2018-11-13 20:22:12 +03:00
|
|
|
feeMsat := event.AmtIn - event.AmtOut
|
2018-02-28 09:23:27 +03:00
|
|
|
|
|
|
|
resp.ForwardingEvents[i] = &lnrpc.ForwardingEvent{
|
|
|
|
Timestamp: uint64(event.Timestamp.Unix()),
|
|
|
|
ChanIdIn: event.IncomingChanID.ToUint64(),
|
|
|
|
ChanIdOut: event.OutgoingChanID.ToUint64(),
|
|
|
|
AmtIn: uint64(amtInSat),
|
|
|
|
AmtOut: uint64(amtOutSat),
|
2018-11-13 20:22:12 +03:00
|
|
|
Fee: uint64(feeMsat.ToSatoshis()),
|
|
|
|
FeeMsat: uint64(feeMsat),
|
2018-02-28 09:23:27 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
2018-12-10 07:12:24 +03:00
|
|
|
|
|
|
|
// ExportChannelBackup attempts to return an encrypted static channel backup
|
|
|
|
// for the target channel identified by it channel point. The backup is
|
|
|
|
// encrypted with a key generated from the aezeed seed of the user. The
|
|
|
|
// returned backup can either be restored using the RestoreChannelBackup method
|
|
|
|
// once lnd is running, or via the InitWallet and UnlockWallet methods from the
|
|
|
|
// WalletUnlocker service.
|
|
|
|
func (r *rpcServer) ExportChannelBackup(ctx context.Context,
|
2019-03-11 03:47:06 +03:00
|
|
|
in *lnrpc.ExportChannelBackupRequest) (*lnrpc.ChannelBackup, error) {
|
2018-12-10 07:12:24 +03:00
|
|
|
|
|
|
|
// First, we'll convert the lnrpc channel point into a wire.OutPoint
|
|
|
|
// that we can manipulate.
|
2019-05-15 10:02:53 +03:00
|
|
|
txid, err := GetChanPointFundingTxid(in.ChanPoint)
|
2018-12-10 07:12:24 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
chanPoint := wire.OutPoint{
|
|
|
|
Hash: *txid,
|
2019-03-11 03:47:06 +03:00
|
|
|
Index: in.ChanPoint.OutputIndex,
|
2018-12-10 07:12:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Next, we'll attempt to fetch a channel backup for this channel from
|
|
|
|
// the database. If this channel has been closed, or the outpoint is
|
|
|
|
// unknown, then we'll return an error
|
|
|
|
unpackedBackup, err := chanbackup.FetchBackupForChan(
|
|
|
|
chanPoint, r.server.chanDB,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// At this point, we have an unpacked backup (plaintext) so we'll now
|
|
|
|
// attempt to serialize and encrypt it in order to create a packed
|
|
|
|
// backup.
|
|
|
|
packedBackups, err := chanbackup.PackStaticChanBackups(
|
|
|
|
[]chanbackup.Single{*unpackedBackup},
|
|
|
|
r.server.cc.keyRing,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("packing of back ups failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Before we proceed, we'll ensure that we received a backup for this
|
|
|
|
// channel, otherwise, we'll bail out.
|
|
|
|
packedBackup, ok := packedBackups[chanPoint]
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("expected single backup for "+
|
|
|
|
"ChannelPoint(%v), got %v", chanPoint,
|
|
|
|
len(packedBackup))
|
|
|
|
}
|
|
|
|
|
|
|
|
return &lnrpc.ChannelBackup{
|
2019-03-11 03:47:06 +03:00
|
|
|
ChanPoint: in.ChanPoint,
|
2018-12-10 07:12:24 +03:00
|
|
|
ChanBackup: packedBackup,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2019-04-05 01:52:31 +03:00
|
|
|
// VerifyChanBackup allows a caller to verify the integrity of a channel backup
|
|
|
|
// snapshot. This method will accept both either a packed Single or a packed
|
|
|
|
// Multi. Specifying both will result in an error.
|
2019-03-11 03:54:49 +03:00
|
|
|
func (r *rpcServer) VerifyChanBackup(ctx context.Context,
|
|
|
|
in *lnrpc.ChanBackupSnapshot) (*lnrpc.VerifyChanBackupResponse, error) {
|
|
|
|
|
2019-04-05 01:52:31 +03:00
|
|
|
switch {
|
|
|
|
// If neither a Single or Multi has been specified, then we have nothing
|
|
|
|
// to verify.
|
|
|
|
case in.GetSingleChanBackups() == nil && in.GetMultiChanBackup() == nil:
|
|
|
|
return nil, errors.New("either a Single or Multi channel " +
|
2019-03-11 03:54:49 +03:00
|
|
|
"backup must be specified")
|
|
|
|
|
2019-04-05 01:52:31 +03:00
|
|
|
// Either a Single or a Multi must be specified, but not both.
|
|
|
|
case in.GetSingleChanBackups() != nil && in.GetMultiChanBackup() != nil:
|
|
|
|
return nil, errors.New("either a Single or Multi channel " +
|
|
|
|
"backup must be specified, but not both")
|
2019-03-11 03:54:49 +03:00
|
|
|
|
2019-04-05 01:52:31 +03:00
|
|
|
// If a Single is specified then we'll only accept one of them to allow
|
|
|
|
// the caller to map the valid/invalid state for each individual Single.
|
|
|
|
case in.GetSingleChanBackups() != nil:
|
|
|
|
chanBackupsProtos := in.GetSingleChanBackups().ChanBackups
|
|
|
|
if len(chanBackupsProtos) != 1 {
|
|
|
|
return nil, errors.New("only one Single is accepted " +
|
|
|
|
"at a time")
|
|
|
|
}
|
2019-03-11 03:54:49 +03:00
|
|
|
|
2019-04-05 01:52:31 +03:00
|
|
|
// First, we'll convert the raw byte slice into a type we can
|
2019-03-11 03:54:49 +03:00
|
|
|
// work with a bit better.
|
|
|
|
chanBackup := chanbackup.PackedSingles(
|
|
|
|
[][]byte{chanBackupsProtos[0].ChanBackup},
|
|
|
|
)
|
|
|
|
|
|
|
|
// With our PackedSingles created, we'll attempt to unpack the
|
2019-04-05 01:52:31 +03:00
|
|
|
// backup. If this fails, then we know the backup is invalid for
|
|
|
|
// some reason.
|
2019-03-11 03:54:49 +03:00
|
|
|
_, err := chanBackup.Unpack(r.server.cc.keyRing)
|
2019-04-05 01:52:31 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("invalid single channel "+
|
|
|
|
"backup: %v", err)
|
|
|
|
}
|
2019-03-11 03:54:49 +03:00
|
|
|
|
2019-04-05 01:52:31 +03:00
|
|
|
case in.GetMultiChanBackup() != nil:
|
|
|
|
// We'll convert the raw byte slice into a PackedMulti that we
|
|
|
|
// can easily work with.
|
2019-03-11 03:54:49 +03:00
|
|
|
packedMultiBackup := in.GetMultiChanBackup().MultiChanBackup
|
|
|
|
packedMulti := chanbackup.PackedMulti(packedMultiBackup)
|
|
|
|
|
2019-04-05 01:52:31 +03:00
|
|
|
// We'll now attempt to unpack the Multi. If this fails, then we
|
|
|
|
// know it's invalid.
|
2019-03-11 03:54:49 +03:00
|
|
|
_, err := packedMulti.Unpack(r.server.cc.keyRing)
|
2019-04-05 01:52:31 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("invalid multi channel backup: "+
|
|
|
|
"%v", err)
|
|
|
|
}
|
2019-03-11 03:54:49 +03:00
|
|
|
}
|
|
|
|
|
2019-04-05 01:52:31 +03:00
|
|
|
return &lnrpc.VerifyChanBackupResponse{}, nil
|
2019-03-11 03:54:49 +03:00
|
|
|
}
|
|
|
|
|
2018-12-10 07:12:24 +03:00
|
|
|
// createBackupSnapshot converts the passed Single backup into a snapshot which
|
|
|
|
// contains individual packed single backups, as well as a single packed multi
|
|
|
|
// backup.
|
|
|
|
func (r *rpcServer) createBackupSnapshot(backups []chanbackup.Single) (
|
|
|
|
*lnrpc.ChanBackupSnapshot, error) {
|
|
|
|
|
|
|
|
// Once we have the set of back ups, we'll attempt to pack them all
|
|
|
|
// into a series of single channel backups.
|
|
|
|
singleChanPackedBackups, err := chanbackup.PackStaticChanBackups(
|
|
|
|
backups, r.server.cc.keyRing,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to pack set of chan "+
|
|
|
|
"backups: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that we have our set of single packed backups, we'll morph that
|
|
|
|
// into a form that the proto response requires.
|
|
|
|
numBackups := len(singleChanPackedBackups)
|
|
|
|
singleBackupResp := &lnrpc.ChannelBackups{
|
|
|
|
ChanBackups: make([]*lnrpc.ChannelBackup, 0, numBackups),
|
|
|
|
}
|
|
|
|
for chanPoint, singlePackedBackup := range singleChanPackedBackups {
|
|
|
|
txid := chanPoint.Hash
|
|
|
|
rpcChanPoint := &lnrpc.ChannelPoint{
|
|
|
|
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
|
|
|
|
FundingTxidBytes: txid[:],
|
|
|
|
},
|
|
|
|
OutputIndex: chanPoint.Index,
|
|
|
|
}
|
|
|
|
|
|
|
|
singleBackupResp.ChanBackups = append(
|
|
|
|
singleBackupResp.ChanBackups,
|
|
|
|
&lnrpc.ChannelBackup{
|
|
|
|
ChanPoint: rpcChanPoint,
|
|
|
|
ChanBackup: singlePackedBackup,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// In addition, to the set of single chan backups, we'll also create a
|
|
|
|
// single multi-channel backup which can be serialized into a single
|
|
|
|
// file for safe storage.
|
|
|
|
var b bytes.Buffer
|
|
|
|
unpackedMultiBackup := chanbackup.Multi{
|
|
|
|
StaticBackups: backups,
|
|
|
|
}
|
|
|
|
err = unpackedMultiBackup.PackToWriter(&b, r.server.cc.keyRing)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to multi-pack backups: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
multiBackupResp := &lnrpc.MultiChanBackup{
|
|
|
|
MultiChanBackup: b.Bytes(),
|
|
|
|
}
|
|
|
|
for _, singleBackup := range singleBackupResp.ChanBackups {
|
|
|
|
multiBackupResp.ChanPoints = append(
|
|
|
|
multiBackupResp.ChanPoints, singleBackup.ChanPoint,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &lnrpc.ChanBackupSnapshot{
|
|
|
|
SingleChanBackups: singleBackupResp,
|
|
|
|
MultiChanBackup: multiBackupResp,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ExportAllChannelBackups returns static channel backups for all existing
|
|
|
|
// channels known to lnd. A set of regular singular static channel backups for
|
|
|
|
// each channel are returned. Additionally, a multi-channel backup is returned
|
|
|
|
// as well, which contains a single encrypted blob containing the backups of
|
|
|
|
// each channel.
|
|
|
|
func (r *rpcServer) ExportAllChannelBackups(ctx context.Context,
|
|
|
|
in *lnrpc.ChanBackupExportRequest) (*lnrpc.ChanBackupSnapshot, error) {
|
|
|
|
|
|
|
|
// First, we'll attempt to read back ups for ALL currently opened
|
|
|
|
// channels from disk.
|
|
|
|
allUnpackedBackups, err := chanbackup.FetchStaticChanBackups(
|
|
|
|
r.server.chanDB,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to fetch all static chan "+
|
|
|
|
"backups: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the backups assembled, we'll create a full snapshot.
|
|
|
|
return r.createBackupSnapshot(allUnpackedBackups)
|
|
|
|
}
|
|
|
|
|
|
|
|
// RestoreChannelBackups accepts a set of singular channel backups, or a single
|
|
|
|
// encrypted multi-chan backup and attempts to recover any funds remaining
|
|
|
|
// within the channel. If we're able to unpack the backup, then the new channel
|
|
|
|
// will be shown under listchannels, as well as pending channels.
|
|
|
|
func (r *rpcServer) RestoreChannelBackups(ctx context.Context,
|
|
|
|
in *lnrpc.RestoreChanBackupRequest) (*lnrpc.RestoreBackupResponse, error) {
|
|
|
|
|
|
|
|
// First, we'll make our implementation of the
|
|
|
|
// chanbackup.ChannelRestorer interface which we'll use to properly
|
|
|
|
// restore either a set of chanbackup.Single or chanbackup.Multi
|
|
|
|
// backups.
|
|
|
|
chanRestorer := &chanDBRestorer{
|
|
|
|
db: r.server.chanDB,
|
|
|
|
secretKeys: r.server.cc.keyRing,
|
|
|
|
chainArb: r.server.chainArb,
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll accept either a list of Single backups, or a single Multi
|
|
|
|
// backup which contains several single backups.
|
|
|
|
switch {
|
|
|
|
case in.GetChanBackups() != nil:
|
|
|
|
chanBackupsProtos := in.GetChanBackups()
|
|
|
|
|
|
|
|
// Now that we know what type of backup we're working with,
|
|
|
|
// we'll parse them all out into a more suitable format.
|
|
|
|
packedBackups := make([][]byte, 0, len(chanBackupsProtos.ChanBackups))
|
|
|
|
for _, chanBackup := range chanBackupsProtos.ChanBackups {
|
|
|
|
packedBackups = append(
|
|
|
|
packedBackups, chanBackup.ChanBackup,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// With our backups obtained, we'll now restore them which will
|
|
|
|
// write the new backups to disk, and then attempt to connect
|
|
|
|
// out to any peers that we know of which were our prior
|
|
|
|
// channel peers.
|
|
|
|
err := chanbackup.UnpackAndRecoverSingles(
|
|
|
|
chanbackup.PackedSingles(packedBackups),
|
|
|
|
r.server.cc.keyRing, chanRestorer, r.server,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to unpack single "+
|
|
|
|
"backups: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
case in.GetMultiChanBackup() != nil:
|
|
|
|
packedMultiBackup := in.GetMultiChanBackup()
|
|
|
|
|
|
|
|
// With our backups obtained, we'll now restore them which will
|
|
|
|
// write the new backups to disk, and then attempt to connect
|
|
|
|
// out to any peers that we know of which were our prior
|
|
|
|
// channel peers.
|
|
|
|
packedMulti := chanbackup.PackedMulti(packedMultiBackup)
|
|
|
|
err := chanbackup.UnpackAndRecoverMulti(
|
|
|
|
packedMulti, r.server.cc.keyRing, chanRestorer,
|
|
|
|
r.server,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to unpack chan "+
|
|
|
|
"backup: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &lnrpc.RestoreBackupResponse{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// SubscribeChannelBackups allows a client to sub-subscribe to the most up to
|
|
|
|
// date information concerning the state of all channel back ups. Each time a
|
|
|
|
// new channel is added, we return the new set of channels, along with a
|
|
|
|
// multi-chan backup containing the backup info for all channels. Each time a
|
|
|
|
// channel is closed, we send a new update, which contains new new chan back
|
|
|
|
// ups, but the updated set of encrypted multi-chan backups with the closed
|
|
|
|
// channel(s) removed.
|
|
|
|
func (r *rpcServer) SubscribeChannelBackups(req *lnrpc.ChannelBackupSubscription,
|
|
|
|
updateStream lnrpc.Lightning_SubscribeChannelBackupsServer) error {
|
|
|
|
|
2019-02-09 06:44:35 +03:00
|
|
|
// First, we'll subscribe to the primary channel notifier so we can
|
|
|
|
// obtain events for new opened/closed channels.
|
|
|
|
chanSubscription, err := r.server.channelNotifier.SubscribeChannelEvents()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
defer chanSubscription.Cancel()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
// A new event has been sent by the channel notifier, we'll
|
|
|
|
// assemble, then sling out a new event to the client.
|
|
|
|
case e := <-chanSubscription.Updates():
|
|
|
|
// TODO(roasbeef): batch dispatch ntnfs
|
2018-12-10 07:12:24 +03:00
|
|
|
|
2019-02-09 06:44:35 +03:00
|
|
|
switch e.(type) {
|
2018-12-10 07:12:24 +03:00
|
|
|
|
2019-02-09 06:44:35 +03:00
|
|
|
// We only care about new/closed channels, so we'll
|
|
|
|
// skip any events for active/inactive channels.
|
|
|
|
case channelnotifier.ActiveChannelEvent:
|
|
|
|
continue
|
|
|
|
case channelnotifier.InactiveChannelEvent:
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that we know the channel state has changed,
|
|
|
|
// we'll obtains the current set of single channel
|
|
|
|
// backups from disk.
|
|
|
|
chanBackups, err := chanbackup.FetchStaticChanBackups(
|
|
|
|
r.server.chanDB,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to fetch all "+
|
|
|
|
"static chan backups: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// With our backups obtained, we'll pack them into a
|
|
|
|
// snapshot and send them back to the client.
|
|
|
|
backupSnapshot, err := r.createBackupSnapshot(
|
|
|
|
chanBackups,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = updateStream.Send(backupSnapshot)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-r.quit:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
2018-12-10 07:12:24 +03:00
|
|
|
}
|