2015-12-26 09:09:17 +03:00
|
|
|
package main
|
2015-12-30 02:09:38 +03:00
|
|
|
|
|
|
|
import (
|
2018-03-28 07:51:04 +03:00
|
|
|
"bytes"
|
2016-11-11 04:37:21 +03:00
|
|
|
"crypto/rand"
|
2017-03-16 04:56:25 +03:00
|
|
|
"crypto/sha256"
|
2016-06-21 21:52:09 +03:00
|
|
|
"encoding/hex"
|
2017-01-25 04:12:51 +03:00
|
|
|
"errors"
|
2015-12-31 08:40:41 +03:00
|
|
|
"fmt"
|
2016-07-13 03:46:25 +03:00
|
|
|
"io"
|
2016-12-27 08:51:47 +03:00
|
|
|
"math"
|
2018-06-15 03:14:31 +03:00
|
|
|
"sort"
|
2017-01-15 05:12:20 +03:00
|
|
|
"strings"
|
2018-07-01 01:13:14 +03:00
|
|
|
"sync"
|
2016-01-17 06:12:36 +03:00
|
|
|
"sync/atomic"
|
2018-07-28 04:39:38 +03:00
|
|
|
"time"
|
2015-12-30 02:09:38 +03:00
|
|
|
|
2018-07-31 10:17:17 +03:00
|
|
|
"github.com/btcsuite/btcd/blockchain"
|
|
|
|
"github.com/btcsuite/btcd/btcec"
|
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
|
|
|
"github.com/btcsuite/btcd/txscript"
|
|
|
|
"github.com/btcsuite/btcd/wire"
|
|
|
|
"github.com/btcsuite/btcutil"
|
|
|
|
"github.com/btcsuite/btcwallet/waddrmgr"
|
2018-03-11 06:00:57 +03:00
|
|
|
"github.com/coreos/bbolt"
|
2016-09-21 03:15:26 +03:00
|
|
|
"github.com/davecgh/go-spew/spew"
|
2016-09-19 22:04:56 +03:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb"
|
2017-05-02 23:04:58 +03:00
|
|
|
"github.com/lightningnetwork/lnd/htlcswitch"
|
2016-01-16 21:38:48 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc"
|
2016-08-13 01:53:18 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwallet"
|
2016-07-13 03:46:25 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
2016-12-27 08:50:19 +03:00
|
|
|
"github.com/lightningnetwork/lnd/routing"
|
2018-06-15 06:19:45 +03:00
|
|
|
"github.com/lightningnetwork/lnd/signal"
|
2017-01-03 02:36:15 +03:00
|
|
|
"github.com/lightningnetwork/lnd/zpay32"
|
2017-04-20 05:28:10 +03:00
|
|
|
"github.com/tv42/zbase32"
|
2015-12-30 02:09:38 +03:00
|
|
|
"golang.org/x/net/context"
|
2018-07-28 04:39:38 +03:00
|
|
|
"gopkg.in/macaroon-bakery.v2/bakery"
|
2015-12-30 02:09:38 +03:00
|
|
|
)
|
|
|
|
|
2018-05-23 22:05:04 +03:00
|
|
|
const (
|
|
|
|
// maxBtcPaymentMSat is the maximum allowed Bitcoin payment currently
|
|
|
|
// permitted as defined in BOLT-0002.
|
|
|
|
maxBtcPaymentMSat = lnwire.MilliSatoshi(math.MaxUint32)
|
|
|
|
|
|
|
|
// maxLtcPaymentMSat is the maximum allowed Litecoin payment currently
|
|
|
|
// permitted.
|
|
|
|
maxLtcPaymentMSat = lnwire.MilliSatoshi(math.MaxUint32) *
|
|
|
|
btcToLtcConversionRate
|
|
|
|
)
|
|
|
|
|
2015-12-30 02:09:38 +03:00
|
|
|
var (
|
2018-07-01 01:13:14 +03:00
|
|
|
zeroHash [32]byte
|
|
|
|
|
2018-05-23 22:05:04 +03:00
|
|
|
// maxPaymentMSat is the maximum allowed payment currently permitted as
|
|
|
|
// defined in BOLT-002. This value depends on which chain is active.
|
|
|
|
// It is set to the value under the Bitcoin chain as default.
|
|
|
|
maxPaymentMSat = maxBtcPaymentMSat
|
|
|
|
|
2015-12-30 02:09:38 +03:00
|
|
|
defaultAccount uint32 = waddrmgr.DefaultAccountNum
|
2017-08-18 04:50:57 +03:00
|
|
|
|
2018-01-16 19:18:41 +03:00
|
|
|
// readPermissions is a slice of all entities that allow read
|
|
|
|
// permissions for authorization purposes, all lowercase.
|
|
|
|
readPermissions = []bakery.Op{
|
|
|
|
{
|
|
|
|
Entity: "onchain",
|
|
|
|
Action: "read",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "read",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Entity: "address",
|
|
|
|
Action: "read",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Entity: "message",
|
|
|
|
Action: "read",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Entity: "peers",
|
|
|
|
Action: "read",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Entity: "info",
|
|
|
|
Action: "read",
|
|
|
|
},
|
2018-03-21 02:42:15 +03:00
|
|
|
{
|
|
|
|
Entity: "invoices",
|
|
|
|
Action: "read",
|
|
|
|
},
|
2018-01-16 19:18:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// writePermissions is a slice of all entities that allow write
|
|
|
|
// permissions for authorization purposes, all lowercase.
|
|
|
|
writePermissions = []bakery.Op{
|
|
|
|
{
|
|
|
|
Entity: "onchain",
|
|
|
|
Action: "write",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "write",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Entity: "address",
|
|
|
|
Action: "write",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Entity: "message",
|
|
|
|
Action: "write",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Entity: "peers",
|
|
|
|
Action: "write",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Entity: "info",
|
|
|
|
Action: "write",
|
|
|
|
},
|
2018-03-21 02:42:15 +03:00
|
|
|
{
|
|
|
|
Entity: "invoices",
|
|
|
|
Action: "write",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2018-03-21 02:40:14 +03:00
|
|
|
// invoicePermissions is a slice of all the entities that allows a user
|
|
|
|
// to only access calls that are related to invoices, so: streaming
|
2018-04-27 00:06:29 +03:00
|
|
|
// RPCs, generating, and listening invoices.
|
2018-03-21 02:40:14 +03:00
|
|
|
invoicePermissions = []bakery.Op{
|
|
|
|
{
|
|
|
|
Entity: "invoices",
|
|
|
|
Action: "read",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Entity: "invoices",
|
|
|
|
Action: "write",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Entity: "address",
|
|
|
|
Action: "read",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Entity: "address",
|
|
|
|
Action: "write",
|
|
|
|
},
|
2018-01-16 19:18:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// permissions maps RPC calls to the permissions they require.
|
|
|
|
permissions = map[string][]bakery.Op{
|
|
|
|
"/lnrpc.Lightning/SendCoins": {{
|
|
|
|
Entity: "onchain",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/SendMany": {{
|
|
|
|
Entity: "onchain",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/NewAddress": {{
|
|
|
|
Entity: "address",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/NewWitnessAddress": {{
|
|
|
|
Entity: "address",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/SignMessage": {{
|
|
|
|
Entity: "message",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/VerifyMessage": {{
|
|
|
|
Entity: "message",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/ConnectPeer": {{
|
|
|
|
Entity: "peers",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/DisconnectPeer": {{
|
|
|
|
Entity: "peers",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/OpenChannel": {{
|
|
|
|
Entity: "onchain",
|
|
|
|
Action: "write",
|
|
|
|
}, {
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
2018-02-12 04:15:31 +03:00
|
|
|
"/lnrpc.Lightning/OpenChannelSync": {{
|
2018-01-16 19:18:41 +03:00
|
|
|
Entity: "onchain",
|
|
|
|
Action: "write",
|
|
|
|
}, {
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/CloseChannel": {{
|
|
|
|
Entity: "onchain",
|
|
|
|
Action: "write",
|
|
|
|
}, {
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/GetInfo": {{
|
|
|
|
Entity: "info",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/ListPeers": {{
|
|
|
|
Entity: "peers",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/WalletBalance": {{
|
|
|
|
Entity: "onchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/ChannelBalance": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/PendingChannels": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/ListChannels": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
2018-05-24 12:35:34 +03:00
|
|
|
"/lnrpc.Lightning/ClosedChannels": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
2018-01-16 19:18:41 +03:00
|
|
|
"/lnrpc.Lightning/SendPayment": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/SendPaymentSync": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
2018-05-01 11:17:55 +03:00
|
|
|
"/lnrpc.Lightning/SendToRoute": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/SendToRouteSync": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
2018-01-16 19:18:41 +03:00
|
|
|
"/lnrpc.Lightning/AddInvoice": {{
|
2018-03-21 02:44:53 +03:00
|
|
|
Entity: "invoices",
|
2018-01-16 19:18:41 +03:00
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/LookupInvoice": {{
|
2018-03-21 02:44:53 +03:00
|
|
|
Entity: "invoices",
|
2018-01-16 19:18:41 +03:00
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/ListInvoices": {{
|
2018-03-21 02:44:53 +03:00
|
|
|
Entity: "invoices",
|
2018-01-16 19:18:41 +03:00
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/SubscribeInvoices": {{
|
2018-03-21 02:44:53 +03:00
|
|
|
Entity: "invoices",
|
2018-01-16 19:18:41 +03:00
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/SubscribeTransactions": {{
|
|
|
|
Entity: "onchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/GetTransactions": {{
|
|
|
|
Entity: "onchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/DescribeGraph": {{
|
|
|
|
Entity: "info",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/GetChanInfo": {{
|
|
|
|
Entity: "info",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/GetNodeInfo": {{
|
|
|
|
Entity: "info",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/QueryRoutes": {{
|
|
|
|
Entity: "info",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/GetNetworkInfo": {{
|
|
|
|
Entity: "info",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/StopDaemon": {{
|
|
|
|
Entity: "info",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/SubscribeChannelGraph": {{
|
|
|
|
Entity: "info",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/ListPayments": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/DeleteAllPayments": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/DebugLevel": {{
|
|
|
|
Entity: "info",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/DecodePayReq": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/FeeReport": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
|
|
|
"/lnrpc.Lightning/UpdateChannelPolicy": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "write",
|
|
|
|
}},
|
2018-02-28 09:23:27 +03:00
|
|
|
"/lnrpc.Lightning/ForwardingHistory": {{
|
|
|
|
Entity: "offchain",
|
|
|
|
Action: "read",
|
|
|
|
}},
|
2017-08-18 04:50:57 +03:00
|
|
|
}
|
2015-12-30 02:09:38 +03:00
|
|
|
)
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// rpcServer is a gRPC, RPC front end to the lnd daemon.
|
2016-09-26 20:29:18 +03:00
|
|
|
// TODO(roasbeef): pagination support for the list-style calls
|
2016-01-17 06:07:44 +03:00
|
|
|
type rpcServer struct {
|
|
|
|
started int32 // To be used atomically.
|
|
|
|
shutdown int32 // To be used atomically.
|
|
|
|
|
2017-08-22 09:25:41 +03:00
|
|
|
server *server
|
|
|
|
|
2016-01-17 06:07:44 +03:00
|
|
|
wg sync.WaitGroup
|
|
|
|
|
|
|
|
quit chan struct{}
|
2015-12-30 02:09:38 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// A compile time check to ensure that rpcServer fully implements the
|
|
|
|
// LightningServer gRPC service.
|
2015-12-30 02:21:42 +03:00
|
|
|
var _ lnrpc.LightningServer = (*rpcServer)(nil)
|
|
|
|
|
2017-02-23 01:49:04 +03:00
|
|
|
// newRPCServer creates and returns a new instance of the rpcServer.
|
2018-01-16 19:18:41 +03:00
|
|
|
func newRPCServer(s *server) *rpcServer {
|
2017-08-18 04:50:57 +03:00
|
|
|
return &rpcServer{
|
2018-01-16 19:18:41 +03:00
|
|
|
server: s,
|
|
|
|
quit: make(chan struct{}, 1),
|
2017-08-18 04:50:57 +03:00
|
|
|
}
|
2015-12-30 02:09:38 +03:00
|
|
|
}
|
|
|
|
|
2017-08-22 09:25:41 +03:00
|
|
|
// Start launches any helper goroutines required for the rpcServer to function.
|
2016-01-17 06:12:36 +03:00
|
|
|
func (r *rpcServer) Start() error {
|
|
|
|
if atomic.AddInt32(&r.started, 1) != 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-01-14 08:41:46 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// Stop signals any active goroutines for a graceful closure.
|
2016-01-17 06:12:36 +03:00
|
|
|
func (r *rpcServer) Stop() error {
|
|
|
|
if atomic.AddInt32(&r.shutdown, 1) != 1 {
|
|
|
|
return nil
|
2016-01-02 07:27:40 +03:00
|
|
|
}
|
2016-01-17 06:12:36 +03:00
|
|
|
|
2016-04-25 06:26:32 +03:00
|
|
|
close(r.quit)
|
|
|
|
|
2016-01-17 06:12:36 +03:00
|
|
|
return nil
|
2016-01-02 07:27:40 +03:00
|
|
|
}
|
|
|
|
|
2016-06-29 21:31:29 +03:00
|
|
|
// addrPairsToOutputs converts a map describing a set of outputs to be created,
|
|
|
|
// the outputs themselves. The passed map pairs up an address, to a desired
|
|
|
|
// output value amount. Each address is converted to its corresponding pkScript
|
|
|
|
// to be used within the constructed output(s).
|
|
|
|
func addrPairsToOutputs(addrPairs map[string]int64) ([]*wire.TxOut, error) {
|
|
|
|
outputs := make([]*wire.TxOut, 0, len(addrPairs))
|
|
|
|
for addr, amt := range addrPairs {
|
2016-07-14 04:37:50 +03:00
|
|
|
addr, err := btcutil.DecodeAddress(addr, activeNetParams.Params)
|
2016-03-23 04:48:46 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
pkscript, err := txscript.PayToAddrScript(addr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
outputs = append(outputs, wire.NewTxOut(amt, pkscript))
|
2015-12-30 02:09:38 +03:00
|
|
|
}
|
|
|
|
|
2016-06-29 21:31:29 +03:00
|
|
|
return outputs, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// sendCoinsOnChain makes an on-chain transaction in or to send coins to one or
|
|
|
|
// more addresses specified in the passed payment map. The payment map maps an
|
|
|
|
// address to a specified output value to be sent to that address.
|
2017-11-23 09:57:23 +03:00
|
|
|
func (r *rpcServer) sendCoinsOnChain(paymentMap map[string]int64,
|
2018-07-28 04:39:38 +03:00
|
|
|
feeRate lnwallet.SatPerKWeight) (*chainhash.Hash, error) {
|
2017-11-23 09:57:23 +03:00
|
|
|
|
2016-06-29 21:31:29 +03:00
|
|
|
outputs, err := addrPairsToOutputs(paymentMap)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-02-13 17:13:01 +03:00
|
|
|
return r.server.cc.wallet.SendOutputs(outputs, feeRate)
|
2017-11-23 09:57:23 +03:00
|
|
|
}
|
|
|
|
|
2018-07-28 04:39:38 +03:00
|
|
|
// determineFeePerKw will determine the fee in sat/kw that should be paid given
|
|
|
|
// an estimator, a confirmation target, and a manual value for sat/byte. A value
|
|
|
|
// is chosen based on the two free parameters as one, or both of them can be
|
|
|
|
// zero.
|
|
|
|
func determineFeePerKw(feeEstimator lnwallet.FeeEstimator, targetConf int32,
|
|
|
|
feePerByte int64) (lnwallet.SatPerKWeight, error) {
|
2017-11-23 09:57:23 +03:00
|
|
|
|
|
|
|
switch {
|
|
|
|
// If the target number of confirmations is set, then we'll use that to
|
2018-02-07 06:11:11 +03:00
|
|
|
// consult our fee estimator for an adequate fee.
|
2017-11-23 09:57:23 +03:00
|
|
|
case targetConf != 0:
|
2018-07-28 04:39:38 +03:00
|
|
|
feePerKw, err := feeEstimator.EstimateFeePerKW(
|
2017-11-23 09:57:23 +03:00
|
|
|
uint32(targetConf),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return 0, fmt.Errorf("unable to query fee "+
|
|
|
|
"estimator: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-07-28 04:39:38 +03:00
|
|
|
return feePerKw, nil
|
2017-11-23 09:57:23 +03:00
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
// If a manual sat/byte fee rate is set, then we'll use that directly.
|
2018-07-28 04:39:38 +03:00
|
|
|
// We'll need to convert it to sat/kw as this is what we use internally.
|
2018-02-13 17:13:01 +03:00
|
|
|
case feePerByte != 0:
|
2018-08-13 22:57:06 +03:00
|
|
|
feePerKW := lnwallet.SatPerKVByte(feePerByte * 1000).FeePerKWeight()
|
|
|
|
if feePerKW < lnwallet.FeePerKwFloor {
|
|
|
|
rpcsLog.Infof("Manual fee rate input of %d sat/kw is "+
|
|
|
|
"too low, using %d sat/kw instead", feePerKW,
|
|
|
|
lnwallet.FeePerKwFloor)
|
|
|
|
feePerKW = lnwallet.FeePerKwFloor
|
|
|
|
}
|
|
|
|
return feePerKW, nil
|
2017-11-23 09:57:23 +03:00
|
|
|
|
|
|
|
// Otherwise, we'll attempt a relaxed confirmation target for the
|
|
|
|
// transaction
|
|
|
|
default:
|
2018-07-28 04:39:38 +03:00
|
|
|
feePerKw, err := feeEstimator.EstimateFeePerKW(6)
|
2017-11-23 09:57:23 +03:00
|
|
|
if err != nil {
|
2018-07-28 04:39:38 +03:00
|
|
|
return 0, fmt.Errorf("unable to query fee estimator: "+
|
|
|
|
"%v", err)
|
2017-11-23 09:57:23 +03:00
|
|
|
}
|
|
|
|
|
2018-07-28 04:39:38 +03:00
|
|
|
return feePerKw, nil
|
2017-11-23 09:57:23 +03:00
|
|
|
}
|
2016-06-29 21:31:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// SendCoins executes a request to send coins to a particular address. Unlike
|
|
|
|
// SendMany, this RPC call only allows creating a single output at a time.
|
|
|
|
func (r *rpcServer) SendCoins(ctx context.Context,
|
|
|
|
in *lnrpc.SendCoinsRequest) (*lnrpc.SendCoinsResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
// Based on the passed fee related parameters, we'll determine an
|
|
|
|
// appropriate fee rate for this transaction.
|
2018-07-28 04:39:38 +03:00
|
|
|
feePerKw, err := determineFeePerKw(
|
2017-11-23 09:57:23 +03:00
|
|
|
r.server.cc.feeEstimator, in.TargetConf, in.SatPerByte,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-07-28 04:39:38 +03:00
|
|
|
rpcsLog.Infof("[sendcoins] addr=%v, amt=%v, sat/kw=%v", in.Addr,
|
|
|
|
btcutil.Amount(in.Amount), int64(feePerKw))
|
2016-06-29 21:31:29 +03:00
|
|
|
|
|
|
|
paymentMap := map[string]int64{in.Addr: in.Amount}
|
2018-07-28 04:39:38 +03:00
|
|
|
txid, err := r.sendCoinsOnChain(paymentMap, feePerKw)
|
2016-06-29 21:31:29 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
rpcsLog.Infof("[sendcoins] spend generated txid: %v", txid.String())
|
|
|
|
|
|
|
|
return &lnrpc.SendCoinsResponse{Txid: txid.String()}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// SendMany handles a request for a transaction create multiple specified
|
|
|
|
// outputs in parallel.
|
|
|
|
func (r *rpcServer) SendMany(ctx context.Context,
|
|
|
|
in *lnrpc.SendManyRequest) (*lnrpc.SendManyResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
// Based on the passed fee related parameters, we'll determine an
|
2018-07-28 04:39:38 +03:00
|
|
|
// appropriate fee rate for this transaction.
|
|
|
|
feePerKw, err := determineFeePerKw(
|
2017-11-23 09:57:23 +03:00
|
|
|
r.server.cc.feeEstimator, in.TargetConf, in.SatPerByte,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-07-28 04:39:38 +03:00
|
|
|
rpcsLog.Infof("[sendmany] outputs=%v, sat/kw=%v",
|
|
|
|
spew.Sdump(in.AddrToAmount), int64(feePerKw))
|
2017-11-23 09:57:23 +03:00
|
|
|
|
2018-07-28 04:39:38 +03:00
|
|
|
txid, err := r.sendCoinsOnChain(in.AddrToAmount, feePerKw)
|
2015-12-30 02:09:38 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-06-29 21:31:29 +03:00
|
|
|
rpcsLog.Infof("[sendmany] spend generated txid: %v", txid.String())
|
2016-04-25 06:26:32 +03:00
|
|
|
|
|
|
|
return &lnrpc.SendManyResponse{Txid: txid.String()}, nil
|
2015-12-30 02:09:38 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// NewAddress creates a new address under control of the local wallet.
|
2016-04-25 06:26:32 +03:00
|
|
|
func (r *rpcServer) NewAddress(ctx context.Context,
|
|
|
|
in *lnrpc.NewAddressRequest) (*lnrpc.NewAddressResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2016-04-25 06:26:32 +03:00
|
|
|
// Translate the gRPC proto address type to the wallet controller's
|
|
|
|
// available address types.
|
2016-08-13 01:53:18 +03:00
|
|
|
var addrType lnwallet.AddressType
|
2016-04-25 06:26:32 +03:00
|
|
|
switch in.Type {
|
|
|
|
case lnrpc.NewAddressRequest_WITNESS_PUBKEY_HASH:
|
2016-08-13 01:53:18 +03:00
|
|
|
addrType = lnwallet.WitnessPubKey
|
2016-04-25 06:26:32 +03:00
|
|
|
case lnrpc.NewAddressRequest_NESTED_PUBKEY_HASH:
|
2016-08-13 01:53:18 +03:00
|
|
|
addrType = lnwallet.NestedWitnessPubKey
|
2016-04-25 06:26:32 +03:00
|
|
|
}
|
|
|
|
|
2017-05-18 21:55:25 +03:00
|
|
|
addr, err := r.server.cc.wallet.NewAddress(addrType, false)
|
2015-12-30 02:09:38 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-06-29 21:31:29 +03:00
|
|
|
rpcsLog.Infof("[newaddress] addr=%v", addr.String())
|
2015-12-30 02:09:38 +03:00
|
|
|
return &lnrpc.NewAddressResponse{Address: addr.String()}, nil
|
|
|
|
}
|
2015-12-31 06:56:57 +03:00
|
|
|
|
2016-10-16 00:41:11 +03:00
|
|
|
// NewWitnessAddress returns a new native witness address under the control of
|
|
|
|
// the local wallet.
|
|
|
|
func (r *rpcServer) NewWitnessAddress(ctx context.Context,
|
|
|
|
in *lnrpc.NewWitnessAddressRequest) (*lnrpc.NewAddressResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2017-08-22 10:23:24 +03:00
|
|
|
addr, err := r.server.cc.wallet.NewAddress(
|
|
|
|
lnwallet.NestedWitnessPubKey, false,
|
|
|
|
)
|
2016-10-16 00:41:11 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
rpcsLog.Infof("[newaddress] addr=%v", addr.String())
|
|
|
|
return &lnrpc.NewAddressResponse{Address: addr.String()}, nil
|
|
|
|
}
|
|
|
|
|
2018-04-26 05:45:26 +03:00
|
|
|
var (
|
|
|
|
// signedMsgPrefix is a special prefix that we'll prepend to any
|
|
|
|
// messages we sign/verify. We do this to ensure that we don't
|
|
|
|
// accidentally sign a sighash, or other sensitive material. By
|
|
|
|
// prepending this fragment, we mind message signing to our particular
|
|
|
|
// context.
|
|
|
|
signedMsgPrefix = []byte("Lightning Signed Message:")
|
|
|
|
)
|
|
|
|
|
2017-04-20 05:28:10 +03:00
|
|
|
// SignMessage signs a message with the resident node's private key. The
|
2018-04-26 05:45:26 +03:00
|
|
|
// returned signature string is zbase32 encoded and pubkey recoverable, meaning
|
|
|
|
// that only the message digest and signature are needed for verification.
|
2017-04-20 05:28:10 +03:00
|
|
|
func (r *rpcServer) SignMessage(ctx context.Context,
|
|
|
|
in *lnrpc.SignMessageRequest) (*lnrpc.SignMessageResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2017-04-20 05:28:10 +03:00
|
|
|
if in.Msg == nil {
|
|
|
|
return nil, fmt.Errorf("need a message to sign")
|
|
|
|
}
|
|
|
|
|
2018-04-26 05:45:26 +03:00
|
|
|
in.Msg = append(signedMsgPrefix, in.Msg...)
|
2017-04-29 14:44:29 +03:00
|
|
|
sigBytes, err := r.server.nodeSigner.SignCompact(in.Msg)
|
2017-04-20 05:28:10 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
sig := zbase32.EncodeToString(sigBytes)
|
|
|
|
return &lnrpc.SignMessageResponse{Signature: sig}, nil
|
|
|
|
}
|
|
|
|
|
2018-04-26 05:45:26 +03:00
|
|
|
// VerifyMessage verifies a signature over a msg. The signature must be zbase32
|
|
|
|
// encoded and signed by an active node in the resident node's channel
|
|
|
|
// database. In addition to returning the validity of the signature,
|
2017-04-29 14:44:29 +03:00
|
|
|
// VerifyMessage also returns the recovered pubkey from the signature.
|
2017-04-20 05:28:10 +03:00
|
|
|
func (r *rpcServer) VerifyMessage(ctx context.Context,
|
|
|
|
in *lnrpc.VerifyMessageRequest) (*lnrpc.VerifyMessageResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2017-04-20 05:28:10 +03:00
|
|
|
if in.Msg == nil {
|
|
|
|
return nil, fmt.Errorf("need a message to verify")
|
|
|
|
}
|
|
|
|
|
|
|
|
// The signature should be zbase32 encoded
|
|
|
|
sig, err := zbase32.DecodeString(in.Signature)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to decode signature: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-04-29 14:44:29 +03:00
|
|
|
// The signature is over the double-sha256 hash of the message.
|
2018-04-26 05:45:26 +03:00
|
|
|
in.Msg = append(signedMsgPrefix, in.Msg...)
|
2017-04-20 05:28:10 +03:00
|
|
|
digest := chainhash.DoubleHashB(in.Msg)
|
|
|
|
|
|
|
|
// RecoverCompact both recovers the pubkey and validates the signature.
|
|
|
|
pubKey, _, err := btcec.RecoverCompact(btcec.S256(), sig, digest)
|
|
|
|
if err != nil {
|
|
|
|
return &lnrpc.VerifyMessageResponse{Valid: false}, nil
|
|
|
|
}
|
2017-04-29 14:44:29 +03:00
|
|
|
pubKeyHex := hex.EncodeToString(pubKey.SerializeCompressed())
|
2017-04-20 05:28:10 +03:00
|
|
|
|
2018-01-31 07:30:00 +03:00
|
|
|
var pub [33]byte
|
|
|
|
copy(pub[:], pubKey.SerializeCompressed())
|
|
|
|
|
2017-04-29 14:44:29 +03:00
|
|
|
// Query the channel graph to ensure a node in the network with active
|
|
|
|
// channels signed the message.
|
2018-04-26 05:45:26 +03:00
|
|
|
//
|
2017-04-20 05:28:10 +03:00
|
|
|
// TODO(phlip9): Require valid nodes to have capital in active channels.
|
2017-04-29 14:44:29 +03:00
|
|
|
graph := r.server.chanDB.ChannelGraph()
|
2018-01-31 07:30:00 +03:00
|
|
|
_, active, err := graph.HasLightningNode(pub)
|
2017-04-20 05:28:10 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("failed to query graph: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-08-03 06:59:43 +03:00
|
|
|
return &lnrpc.VerifyMessageResponse{
|
|
|
|
Valid: active,
|
|
|
|
Pubkey: pubKeyHex,
|
|
|
|
}, nil
|
2017-04-20 05:28:10 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// ConnectPeer attempts to establish a connection to a remote peer.
|
2016-01-17 06:12:36 +03:00
|
|
|
func (r *rpcServer) ConnectPeer(ctx context.Context,
|
|
|
|
in *lnrpc.ConnectPeerRequest) (*lnrpc.ConnectPeerResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2017-08-03 06:59:43 +03:00
|
|
|
// The server hasn't yet started, so it won't be able to service any of
|
|
|
|
// our requests, so we'll bail early here.
|
|
|
|
if !r.server.Started() {
|
|
|
|
return nil, fmt.Errorf("chain backend is still syncing, server " +
|
|
|
|
"not active yet")
|
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
if in.Addr == nil {
|
2016-01-02 07:27:40 +03:00
|
|
|
return nil, fmt.Errorf("need: lnc pubkeyhash@hostname")
|
|
|
|
}
|
|
|
|
|
2016-10-28 05:49:10 +03:00
|
|
|
pubkeyHex, err := hex.DecodeString(in.Addr.Pubkey)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-04-21 23:32:17 +03:00
|
|
|
pubKey, err := btcec.ParsePubKey(pubkeyHex, btcec.S256())
|
2016-10-28 05:49:10 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-06-21 21:52:09 +03:00
|
|
|
|
2017-04-21 23:32:17 +03:00
|
|
|
// Connections to ourselves are disallowed for obvious reasons.
|
|
|
|
if pubKey.IsEqual(r.server.identityPriv.PubKey()) {
|
|
|
|
return nil, fmt.Errorf("cannot make connection to self")
|
|
|
|
}
|
|
|
|
|
2018-04-27 23:59:59 +03:00
|
|
|
addr, err := parseAddr(in.Addr.Host)
|
2016-06-21 21:52:09 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-10-28 05:49:10 +03:00
|
|
|
peerAddr := &lnwire.NetAddress{
|
2017-04-21 23:32:17 +03:00
|
|
|
IdentityKey: pubKey,
|
2018-04-27 23:59:59 +03:00
|
|
|
Address: addr,
|
2016-10-28 05:49:10 +03:00
|
|
|
ChainNet: activeNetParams.Net,
|
|
|
|
}
|
|
|
|
|
2017-01-10 06:08:52 +03:00
|
|
|
if err := r.server.ConnectToPeer(peerAddr, in.Perm); err != nil {
|
2016-06-21 21:52:09 +03:00
|
|
|
rpcsLog.Errorf("(connectpeer): error connecting to peer: %v", err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
rpcsLog.Debugf("Connected to peer: %v", peerAddr.String())
|
2017-01-10 06:08:52 +03:00
|
|
|
return &lnrpc.ConnectPeerResponse{}, nil
|
2016-06-21 21:52:09 +03:00
|
|
|
}
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2017-05-06 02:02:03 +03:00
|
|
|
// DisconnectPeer attempts to disconnect one peer from another identified by a
|
2017-12-30 17:44:31 +03:00
|
|
|
// given pubKey. In the case that we currently have a pending or active channel
|
2017-07-28 02:39:49 +03:00
|
|
|
// with the target peer, this action will be disallowed.
|
2017-05-06 02:02:03 +03:00
|
|
|
func (r *rpcServer) DisconnectPeer(ctx context.Context,
|
|
|
|
in *lnrpc.DisconnectPeerRequest) (*lnrpc.DisconnectPeerResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2017-05-06 02:02:03 +03:00
|
|
|
rpcsLog.Debugf("[disconnectpeer] from peer(%s)", in.PubKey)
|
|
|
|
|
2017-08-03 06:59:43 +03:00
|
|
|
if !r.server.Started() {
|
|
|
|
return nil, fmt.Errorf("chain backend is still syncing, server " +
|
|
|
|
"not active yet")
|
|
|
|
}
|
|
|
|
|
2017-05-06 02:02:03 +03:00
|
|
|
// First we'll validate the string passed in within the request to
|
|
|
|
// ensure that it's a valid hex-string, and also a valid compressed
|
|
|
|
// public key.
|
|
|
|
pubKeyBytes, err := hex.DecodeString(in.PubKey)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to decode pubkey bytes: %v", err)
|
|
|
|
}
|
|
|
|
peerPubKey, err := btcec.ParsePubKey(pubKeyBytes, btcec.S256())
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to parse pubkey: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, we'll fetch the pending/active channels we have with a
|
|
|
|
// particular peer.
|
|
|
|
nodeChannels, err := r.server.chanDB.FetchOpenChannels(peerPubKey)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to fetch channels for peer: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// In order to avoid erroneously disconnecting from a peer that we have
|
|
|
|
// an active channel with, if we have any channels active with this
|
|
|
|
// peer, then we'll disallow disconnecting from them.
|
2018-02-23 01:28:48 +03:00
|
|
|
if len(nodeChannels) > 0 && !cfg.UnsafeDisconnect {
|
2017-05-06 02:02:03 +03:00
|
|
|
return nil, fmt.Errorf("cannot disconnect from peer(%x), "+
|
|
|
|
"all active channels with the peer need to be closed "+
|
|
|
|
"first", pubKeyBytes)
|
|
|
|
}
|
|
|
|
|
|
|
|
// With all initial validation complete, we'll now request that the
|
2017-12-30 17:44:31 +03:00
|
|
|
// server disconnects from the peer.
|
2017-05-06 02:02:03 +03:00
|
|
|
if err := r.server.DisconnectPeer(peerPubKey); err != nil {
|
2017-05-02 22:31:35 +03:00
|
|
|
return nil, fmt.Errorf("unable to disconnect peer: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &lnrpc.DisconnectPeerResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// OpenChannel attempts to open a singly funded channel specified in the
|
|
|
|
// request to a remote peer.
|
2016-07-08 01:30:55 +03:00
|
|
|
func (r *rpcServer) OpenChannel(in *lnrpc.OpenChannelRequest,
|
|
|
|
updateStream lnrpc.Lightning_OpenChannelServer) error {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2018-02-20 02:01:23 +03:00
|
|
|
rpcsLog.Tracef("[openchannel] request to NodeKey(%v) "+
|
2018-02-14 08:48:42 +03:00
|
|
|
"allocation(us=%v, them=%v)", in.NodePubkeyString,
|
2017-07-31 00:25:03 +03:00
|
|
|
in.LocalFundingAmount, in.PushSat)
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2017-08-03 06:59:43 +03:00
|
|
|
if !r.server.Started() {
|
|
|
|
return fmt.Errorf("chain backend is still syncing, server " +
|
|
|
|
"not active yet")
|
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
localFundingAmt := btcutil.Amount(in.LocalFundingAmount)
|
2017-01-10 06:05:11 +03:00
|
|
|
remoteInitialBalance := btcutil.Amount(in.PushSat)
|
2017-12-17 02:00:11 +03:00
|
|
|
minHtlc := lnwire.MilliSatoshi(in.MinHtlcMsat)
|
2018-03-14 16:15:44 +03:00
|
|
|
remoteCsvDelay := uint16(in.RemoteCsvDelay)
|
2017-01-10 06:05:11 +03:00
|
|
|
|
|
|
|
// Ensure that the initial balance of the remote party (if pushing
|
2017-08-06 02:52:37 +03:00
|
|
|
// satoshis) does not exceed the amount the local party has requested
|
2017-01-10 06:05:11 +03:00
|
|
|
// for funding.
|
2017-08-22 09:25:41 +03:00
|
|
|
//
|
|
|
|
// TODO(roasbeef): incorporate base fee?
|
2017-01-10 06:05:11 +03:00
|
|
|
if remoteInitialBalance >= localFundingAmt {
|
|
|
|
return fmt.Errorf("amount pushed to remote peer for initial " +
|
|
|
|
"state must be below the local funding amount")
|
|
|
|
}
|
2016-11-11 04:37:21 +03:00
|
|
|
|
2017-08-06 02:52:37 +03:00
|
|
|
// Ensure that the user doesn't exceed the current soft-limit for
|
|
|
|
// channel size. If the funding amount is above the soft-limit, then
|
|
|
|
// we'll reject the request.
|
|
|
|
if localFundingAmt > maxFundingAmount {
|
|
|
|
return fmt.Errorf("funding amount is too large, the max "+
|
|
|
|
"channel size is: %v", maxFundingAmount)
|
|
|
|
}
|
|
|
|
|
2018-03-19 02:58:39 +03:00
|
|
|
// Restrict the size of the channel we'll actually open. At a later
|
|
|
|
// level, we'll ensure that the output we create after accounting for
|
|
|
|
// fees that a dust output isn't created.
|
|
|
|
if localFundingAmt < minChanFundingSize {
|
2017-01-17 07:46:32 +03:00
|
|
|
return fmt.Errorf("channel is too small, the minimum channel "+
|
2018-03-19 02:58:39 +03:00
|
|
|
"size is: %v SAT", int64(minChanFundingSize))
|
2017-01-17 07:46:32 +03:00
|
|
|
}
|
|
|
|
|
2018-08-10 05:44:25 +03:00
|
|
|
// Ensure that the MinConfs parameter is non-negative.
|
|
|
|
if in.MinConfs < 0 {
|
|
|
|
return errors.New("minimum number of confirmations must be a " +
|
|
|
|
"non-negative number")
|
|
|
|
}
|
|
|
|
|
2017-01-15 05:12:20 +03:00
|
|
|
var (
|
2017-04-21 23:32:17 +03:00
|
|
|
nodePubKey *btcec.PublicKey
|
|
|
|
nodePubKeyBytes []byte
|
2017-02-03 03:49:52 +03:00
|
|
|
err error
|
2017-01-15 05:12:20 +03:00
|
|
|
)
|
|
|
|
|
2017-03-14 06:39:16 +03:00
|
|
|
// TODO(roasbeef): also return channel ID?
|
|
|
|
|
2018-02-20 01:55:22 +03:00
|
|
|
// Ensure that the NodePubKey is set before attempting to use it
|
|
|
|
if len(in.NodePubkey) == 0 {
|
|
|
|
return fmt.Errorf("NodePubKey is not set")
|
|
|
|
}
|
2017-04-21 23:32:17 +03:00
|
|
|
|
2018-02-14 08:48:42 +03:00
|
|
|
// Parse the raw bytes of the node key into a pubkey object so we
|
|
|
|
// can easily manipulate it.
|
|
|
|
nodePubKey, err = btcec.ParsePubKey(in.NodePubkey, btcec.S256())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-04-21 23:32:17 +03:00
|
|
|
|
2018-02-14 08:48:42 +03:00
|
|
|
// Making a channel to ourselves wouldn't be of any use, so we
|
|
|
|
// explicitly disallow them.
|
|
|
|
if nodePubKey.IsEqual(r.server.identityPriv.PubKey()) {
|
|
|
|
return fmt.Errorf("cannot open channel to self")
|
2016-10-28 05:49:10 +03:00
|
|
|
}
|
|
|
|
|
2018-02-14 08:48:42 +03:00
|
|
|
nodePubKeyBytes = nodePubKey.SerializeCompressed()
|
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
// Based on the passed fee related parameters, we'll determine an
|
|
|
|
// appropriate fee rate for the funding transaction.
|
2018-07-28 04:39:38 +03:00
|
|
|
feeRate, err := determineFeePerKw(
|
2017-11-23 09:57:23 +03:00
|
|
|
r.server.cc.feeEstimator, in.TargetConf, in.SatPerByte,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-07-28 04:39:38 +03:00
|
|
|
rpcsLog.Debugf("[openchannel]: using fee of %v sat/kw for funding tx",
|
|
|
|
int64(feeRate))
|
2017-11-23 09:57:23 +03:00
|
|
|
|
2016-11-11 04:37:21 +03:00
|
|
|
// Instruct the server to trigger the necessary events to attempt to
|
|
|
|
// open a new channel. A stream is returned in place, this stream will
|
|
|
|
// be used to consume updates of the state of the pending channel.
|
2018-08-10 05:17:16 +03:00
|
|
|
req := &openChanReq{
|
|
|
|
targetPubkey: nodePubKey,
|
|
|
|
chainHash: *activeNetParams.GenesisHash,
|
|
|
|
localFundingAmt: localFundingAmt,
|
|
|
|
pushAmt: lnwire.NewMSatFromSatoshis(remoteInitialBalance),
|
|
|
|
minHtlc: minHtlc,
|
|
|
|
fundingFeePerKw: feeRate,
|
|
|
|
private: in.Private,
|
|
|
|
remoteCsvDelay: remoteCsvDelay,
|
2018-08-10 05:44:25 +03:00
|
|
|
minConfs: in.MinConfs,
|
2018-08-10 05:17:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
updateChan, errChan := r.server.OpenChannel(req)
|
2016-01-02 07:27:40 +03:00
|
|
|
|
2016-08-31 02:52:53 +03:00
|
|
|
var outpoint wire.OutPoint
|
|
|
|
out:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case err := <-errChan:
|
2018-02-20 02:01:23 +03:00
|
|
|
rpcsLog.Errorf("unable to open channel to NodeKey(%x): %v",
|
2018-02-14 08:48:42 +03:00
|
|
|
nodePubKeyBytes, err)
|
2016-07-08 01:30:55 +03:00
|
|
|
return err
|
2016-08-31 02:52:53 +03:00
|
|
|
case fundingUpdate := <-updateChan:
|
|
|
|
rpcsLog.Tracef("[openchannel] sending update: %v",
|
|
|
|
fundingUpdate)
|
|
|
|
if err := updateStream.Send(fundingUpdate); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// If a final channel open update is being sent, then
|
|
|
|
// we can break out of our recv loop as we no longer
|
|
|
|
// need to process any further updates.
|
|
|
|
switch update := fundingUpdate.Update.(type) {
|
|
|
|
case *lnrpc.OpenStatusUpdate_ChanOpen:
|
|
|
|
chanPoint := update.ChanOpen.ChannelPoint
|
2018-01-11 07:59:30 +03:00
|
|
|
txidHash, err := getChanPointFundingTxid(chanPoint)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
h, err := chainhash.NewHash(txidHash)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-08-31 02:52:53 +03:00
|
|
|
outpoint = wire.OutPoint{
|
|
|
|
Hash: *h,
|
|
|
|
Index: chanPoint.OutputIndex,
|
|
|
|
}
|
|
|
|
|
|
|
|
break out
|
|
|
|
}
|
|
|
|
case <-r.quit:
|
|
|
|
return nil
|
2016-07-08 01:30:55 +03:00
|
|
|
}
|
|
|
|
}
|
2016-08-31 02:52:53 +03:00
|
|
|
|
2018-02-20 02:01:23 +03:00
|
|
|
rpcsLog.Tracef("[openchannel] success NodeKey(%x), ChannelPoint(%v)",
|
2018-02-14 08:48:42 +03:00
|
|
|
nodePubKeyBytes, outpoint)
|
2016-07-08 01:30:55 +03:00
|
|
|
return nil
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
|
|
|
|
2016-11-11 04:37:21 +03:00
|
|
|
// OpenChannelSync is a synchronous version of the OpenChannel RPC call. This
|
|
|
|
// call is meant to be consumed by clients to the REST proxy. As with all other
|
|
|
|
// sync calls, all byte slices are instead to be populated as hex encoded
|
|
|
|
// strings.
|
|
|
|
func (r *rpcServer) OpenChannelSync(ctx context.Context,
|
|
|
|
in *lnrpc.OpenChannelRequest) (*lnrpc.ChannelPoint, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2018-02-20 02:01:23 +03:00
|
|
|
rpcsLog.Tracef("[openchannel] request to NodeKey(%v) "+
|
2018-02-14 08:48:42 +03:00
|
|
|
"allocation(us=%v, them=%v)", in.NodePubkeyString,
|
2017-07-31 00:25:03 +03:00
|
|
|
in.LocalFundingAmount, in.PushSat)
|
2016-11-11 04:37:21 +03:00
|
|
|
|
2017-08-03 06:59:43 +03:00
|
|
|
// We don't allow new channels to be open while the server is still
|
|
|
|
// syncing, as otherwise we may not be able to obtain the relevant
|
|
|
|
// notifications.
|
|
|
|
if !r.server.Started() {
|
|
|
|
return nil, fmt.Errorf("chain backend is still syncing, server " +
|
|
|
|
"not active yet")
|
|
|
|
}
|
|
|
|
|
2017-01-25 04:12:51 +03:00
|
|
|
// Creation of channels before the wallet syncs up is currently
|
|
|
|
// disallowed.
|
2017-12-10 10:42:46 +03:00
|
|
|
isSynced, _, err := r.server.cc.wallet.IsSynced()
|
2017-01-25 04:12:51 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if !isSynced {
|
|
|
|
return nil, errors.New("channels cannot be created before the " +
|
|
|
|
"wallet is fully synced")
|
|
|
|
}
|
|
|
|
|
2016-11-11 04:37:21 +03:00
|
|
|
// Decode the provided target node's public key, parsing it into a pub
|
|
|
|
// key object. For all sync call, byte slices are expected to be
|
|
|
|
// encoded as hex strings.
|
|
|
|
keyBytes, err := hex.DecodeString(in.NodePubkeyString)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
nodepubKey, err := btcec.ParsePubKey(keyBytes, btcec.S256())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
localFundingAmt := btcutil.Amount(in.LocalFundingAmount)
|
2017-01-10 06:05:11 +03:00
|
|
|
remoteInitialBalance := btcutil.Amount(in.PushSat)
|
2017-12-17 02:00:11 +03:00
|
|
|
minHtlc := lnwire.MilliSatoshi(in.MinHtlcMsat)
|
2018-03-14 16:15:44 +03:00
|
|
|
remoteCsvDelay := uint16(in.RemoteCsvDelay)
|
2017-01-10 06:05:11 +03:00
|
|
|
|
|
|
|
// Ensure that the initial balance of the remote party (if pushing
|
2017-12-30 17:44:31 +03:00
|
|
|
// satoshis) does not exceed the amount the local party has requested
|
2017-01-10 06:05:11 +03:00
|
|
|
// for funding.
|
|
|
|
if remoteInitialBalance >= localFundingAmt {
|
|
|
|
return nil, fmt.Errorf("amount pushed to remote peer for " +
|
|
|
|
"initial state must be below the local funding amount")
|
|
|
|
}
|
2016-11-11 04:37:21 +03:00
|
|
|
|
2018-03-19 02:58:39 +03:00
|
|
|
// Restrict the size of the channel we'll actually open. At a later
|
|
|
|
// level, we'll ensure that the output we create after accounting for
|
|
|
|
// fees that a dust output isn't created.
|
|
|
|
if localFundingAmt < minChanFundingSize {
|
|
|
|
return nil, fmt.Errorf("channel is too small, the minimum channel "+
|
|
|
|
"size is: %v SAT", int64(minChanFundingSize))
|
|
|
|
}
|
|
|
|
|
2018-08-10 05:44:25 +03:00
|
|
|
// Ensure that the MinConfs parameter is non-negative.
|
|
|
|
if in.MinConfs < 0 {
|
|
|
|
return nil, errors.New("minimum number of confirmations must " +
|
|
|
|
"be a non-negative number")
|
|
|
|
}
|
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
// Based on the passed fee related parameters, we'll determine an
|
2017-12-30 17:44:31 +03:00
|
|
|
// appropriate fee rate for the funding transaction.
|
2018-07-28 04:39:38 +03:00
|
|
|
feeRate, err := determineFeePerKw(
|
2017-11-23 09:57:23 +03:00
|
|
|
r.server.cc.feeEstimator, in.TargetConf, in.SatPerByte,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-07-28 04:39:38 +03:00
|
|
|
rpcsLog.Tracef("[openchannel] target sat/kw for funding tx: %v",
|
2018-02-13 17:13:01 +03:00
|
|
|
int64(feeRate))
|
2017-11-23 09:57:23 +03:00
|
|
|
|
2018-08-10 05:17:16 +03:00
|
|
|
req := &openChanReq{
|
|
|
|
targetPubkey: nodepubKey,
|
|
|
|
chainHash: *activeNetParams.GenesisHash,
|
|
|
|
localFundingAmt: localFundingAmt,
|
|
|
|
pushAmt: lnwire.NewMSatFromSatoshis(remoteInitialBalance),
|
|
|
|
minHtlc: minHtlc,
|
|
|
|
fundingFeePerKw: feeRate,
|
|
|
|
private: in.Private,
|
|
|
|
remoteCsvDelay: remoteCsvDelay,
|
2018-08-10 05:44:25 +03:00
|
|
|
minConfs: in.MinConfs,
|
2018-08-10 05:17:16 +03:00
|
|
|
}
|
2016-11-11 04:37:21 +03:00
|
|
|
|
2018-08-10 05:17:16 +03:00
|
|
|
updateChan, errChan := r.server.OpenChannel(req)
|
2016-11-11 04:37:21 +03:00
|
|
|
select {
|
|
|
|
// If an error occurs them immediately return the error to the client.
|
|
|
|
case err := <-errChan:
|
2018-02-20 02:01:23 +03:00
|
|
|
rpcsLog.Errorf("unable to open channel to NodeKey(%x): %v",
|
2018-02-14 08:48:42 +03:00
|
|
|
nodepubKey, err)
|
2016-11-11 04:37:21 +03:00
|
|
|
return nil, err
|
|
|
|
|
|
|
|
// Otherwise, wait for the first channel update. The first update sent
|
|
|
|
// is when the funding transaction is broadcast to the network.
|
|
|
|
case fundingUpdate := <-updateChan:
|
|
|
|
rpcsLog.Tracef("[openchannel] sending update: %v",
|
|
|
|
fundingUpdate)
|
|
|
|
|
|
|
|
// Parse out the txid of the pending funding transaction. The
|
|
|
|
// sync client can use this to poll against the list of
|
|
|
|
// PendingChannels.
|
|
|
|
openUpdate := fundingUpdate.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
|
|
|
|
chanUpdate := openUpdate.ChanPending
|
|
|
|
|
|
|
|
return &lnrpc.ChannelPoint{
|
2018-01-11 07:59:30 +03:00
|
|
|
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
|
|
|
|
FundingTxidBytes: chanUpdate.Txid,
|
|
|
|
},
|
2016-11-11 04:37:21 +03:00
|
|
|
}, nil
|
|
|
|
case <-r.quit:
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-11 07:59:30 +03:00
|
|
|
// getChanPointFundingTxid returns the given channel point's funding txid in
|
|
|
|
// raw bytes.
|
|
|
|
func getChanPointFundingTxid(chanPoint *lnrpc.ChannelPoint) ([]byte, error) {
|
|
|
|
var txid []byte
|
|
|
|
|
|
|
|
// A channel point's funding txid can be get/set as a byte slice or a
|
|
|
|
// string. In the case it is a string, decode it.
|
|
|
|
switch chanPoint.GetFundingTxid().(type) {
|
|
|
|
case *lnrpc.ChannelPoint_FundingTxidBytes:
|
|
|
|
txid = chanPoint.GetFundingTxidBytes()
|
|
|
|
case *lnrpc.ChannelPoint_FundingTxidStr:
|
|
|
|
s := chanPoint.GetFundingTxidStr()
|
|
|
|
h, err := chainhash.NewHashFromStr(s)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
txid = h[:]
|
|
|
|
}
|
|
|
|
|
|
|
|
return txid, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// CloseChannel attempts to close an active channel identified by its channel
|
2016-06-21 22:32:32 +03:00
|
|
|
// point. The actions of this method can additionally be augmented to attempt
|
|
|
|
// a force close after a timeout period in the case of an inactive peer.
|
2016-07-08 01:30:55 +03:00
|
|
|
func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest,
|
|
|
|
updateStream lnrpc.Lightning_CloseChannelServer) error {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2016-09-12 22:42:26 +03:00
|
|
|
force := in.Force
|
2016-06-21 22:32:32 +03:00
|
|
|
index := in.ChannelPoint.OutputIndex
|
2018-01-11 07:59:30 +03:00
|
|
|
txidHash, err := getChanPointFundingTxid(in.GetChannelPoint())
|
|
|
|
if err != nil {
|
|
|
|
rpcsLog.Errorf("[closechannel] unable to get funding txid: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
txid, err := chainhash.NewHash(txidHash)
|
2016-06-21 22:32:32 +03:00
|
|
|
if err != nil {
|
2016-06-29 21:31:29 +03:00
|
|
|
rpcsLog.Errorf("[closechannel] invalid txid: %v", err)
|
2016-07-08 01:30:55 +03:00
|
|
|
return err
|
2016-01-02 07:27:40 +03:00
|
|
|
}
|
2017-01-04 03:02:51 +03:00
|
|
|
chanPoint := wire.NewOutPoint(txid, index)
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2017-09-12 19:00:40 +03:00
|
|
|
rpcsLog.Tracef("[closechannel] request for ChannelPoint(%v), force=%v",
|
|
|
|
chanPoint, force)
|
2017-01-04 03:02:51 +03:00
|
|
|
|
|
|
|
var (
|
|
|
|
updateChan chan *lnrpc.CloseStatusUpdate
|
|
|
|
errChan chan error
|
|
|
|
)
|
|
|
|
|
2017-09-12 19:00:40 +03:00
|
|
|
// TODO(roasbeef): if force and peer online then don't force?
|
|
|
|
|
2018-03-30 22:51:01 +03:00
|
|
|
// First, we'll fetch the channel as is, as we'll need to examine it
|
|
|
|
// regardless of if this is a force close or not.
|
|
|
|
channel, err := r.fetchActiveChannel(*chanPoint)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
channel.Stop()
|
|
|
|
|
2017-01-04 03:02:51 +03:00
|
|
|
// If a force closure was requested, then we'll handle all the details
|
|
|
|
// around the creation and broadcast of the unilateral closure
|
|
|
|
// transaction here rather than going to the switch as we don't require
|
|
|
|
// interaction from the peer.
|
|
|
|
if force {
|
2017-05-18 21:55:25 +03:00
|
|
|
_, bestHeight, err := r.server.cc.chainIO.GetBestBlock()
|
2017-05-11 03:27:05 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-05-05 02:04:41 +03:00
|
|
|
// As we're force closing this channel, as a precaution, we'll
|
|
|
|
// ensure that the switch doesn't continue to see this channel
|
|
|
|
// as eligible for forwarding HTLC's. If the peer is online,
|
|
|
|
// then we'll also purge all of its indexes.
|
|
|
|
remotePub := &channel.StateSnapshot().RemoteIdentity
|
2017-08-09 01:49:32 +03:00
|
|
|
if peer, err := r.server.FindPeer(remotePub); err == nil {
|
2017-05-16 04:13:39 +03:00
|
|
|
// TODO(roasbeef): actually get the active channel
|
|
|
|
// instead too?
|
|
|
|
// * so only need to grab from database
|
2017-11-23 22:36:12 +03:00
|
|
|
peer.WipeChannel(channel.ChannelPoint())
|
2017-05-05 02:04:41 +03:00
|
|
|
} else {
|
|
|
|
chanID := lnwire.NewChanIDFromOutPoint(channel.ChannelPoint())
|
2017-05-02 23:04:58 +03:00
|
|
|
r.server.htlcSwitch.RemoveLink(chanID)
|
2017-05-05 02:04:41 +03:00
|
|
|
}
|
|
|
|
|
2017-05-05 02:05:34 +03:00
|
|
|
// With the necessary indexes cleaned up, we'll now force close
|
|
|
|
// the channel.
|
2018-01-17 07:03:28 +03:00
|
|
|
chainArbitrator := r.server.chainArb
|
|
|
|
closingTx, err := chainArbitrator.ForceCloseContract(
|
|
|
|
*chanPoint,
|
|
|
|
)
|
2017-01-04 03:02:51 +03:00
|
|
|
if err != nil {
|
2017-02-21 06:53:04 +03:00
|
|
|
rpcsLog.Errorf("unable to force close transaction: %v", err)
|
2017-01-04 03:02:51 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-01-17 07:03:28 +03:00
|
|
|
closingTxid := closingTx.TxHash()
|
|
|
|
|
2017-05-05 02:05:34 +03:00
|
|
|
// With the transaction broadcast, we send our first update to
|
|
|
|
// the client.
|
2017-08-05 04:29:14 +03:00
|
|
|
updateChan = make(chan *lnrpc.CloseStatusUpdate, 2)
|
2017-05-05 02:05:34 +03:00
|
|
|
updateChan <- &lnrpc.CloseStatusUpdate{
|
|
|
|
Update: &lnrpc.CloseStatusUpdate_ClosePending{
|
|
|
|
ClosePending: &lnrpc.PendingUpdate{
|
|
|
|
Txid: closingTxid[:],
|
2017-01-04 03:02:51 +03:00
|
|
|
},
|
2017-05-05 02:05:34 +03:00
|
|
|
},
|
|
|
|
}
|
2016-11-29 05:44:14 +03:00
|
|
|
|
2017-05-05 02:05:34 +03:00
|
|
|
errChan = make(chan error, 1)
|
2017-05-18 21:55:25 +03:00
|
|
|
notifier := r.server.cc.chainNotifier
|
2017-05-11 03:27:05 +03:00
|
|
|
go waitForChanToClose(uint32(bestHeight), notifier, errChan, chanPoint,
|
2018-05-31 08:18:44 +03:00
|
|
|
&closingTxid, closingTx.TxOut[0].PkScript, func() {
|
2017-05-11 03:27:05 +03:00
|
|
|
// Respond to the local subsystem which
|
|
|
|
// requested the channel closure.
|
|
|
|
updateChan <- &lnrpc.CloseStatusUpdate{
|
|
|
|
Update: &lnrpc.CloseStatusUpdate_ChanClose{
|
|
|
|
ChanClose: &lnrpc.ChannelCloseUpdate{
|
|
|
|
ClosingTxid: closingTxid[:],
|
|
|
|
Success: true,
|
|
|
|
},
|
2017-01-04 03:02:51 +03:00
|
|
|
},
|
2017-05-11 03:27:05 +03:00
|
|
|
}
|
2017-08-05 04:29:14 +03:00
|
|
|
})
|
2017-01-04 03:02:51 +03:00
|
|
|
} else {
|
2018-04-05 19:52:35 +03:00
|
|
|
// If the link is not known by the switch, we cannot gracefully close
|
|
|
|
// the channel.
|
|
|
|
channelID := lnwire.NewChanIDFromOutPoint(chanPoint)
|
|
|
|
if _, err := r.server.htlcSwitch.GetLink(channelID); err != nil {
|
|
|
|
rpcsLog.Debugf("Trying to non-force close offline channel with "+
|
|
|
|
"chan_point=%v", chanPoint)
|
|
|
|
return fmt.Errorf("unable to gracefully close channel while peer "+
|
|
|
|
"is offline (try force closing it instead): %v", err)
|
|
|
|
}
|
|
|
|
|
2018-01-17 07:03:28 +03:00
|
|
|
// Based on the passed fee related parameters, we'll determine
|
|
|
|
// an appropriate fee rate for the cooperative closure
|
2017-11-23 09:57:23 +03:00
|
|
|
// transaction.
|
2018-07-28 04:39:38 +03:00
|
|
|
feeRate, err := determineFeePerKw(
|
2017-11-23 09:57:23 +03:00
|
|
|
r.server.cc.feeEstimator, in.TargetConf, in.SatPerByte,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-07-28 04:39:38 +03:00
|
|
|
rpcsLog.Debugf("Target sat/kw for closing transaction: %v",
|
2018-02-13 17:13:01 +03:00
|
|
|
int64(feeRate))
|
2017-11-23 09:57:23 +03:00
|
|
|
|
2018-03-30 22:51:01 +03:00
|
|
|
// Before we attempt the cooperative channel closure, we'll
|
|
|
|
// examine the channel to ensure that it doesn't have a
|
|
|
|
// lingering HTLC.
|
2018-03-30 23:04:59 +03:00
|
|
|
if len(channel.ActiveHtlcs()) != 0 {
|
|
|
|
return fmt.Errorf("cannot co-op close channel " +
|
2018-03-30 22:51:01 +03:00
|
|
|
"with active htlcs")
|
|
|
|
}
|
|
|
|
|
2017-01-04 03:02:51 +03:00
|
|
|
// Otherwise, the caller has requested a regular interactive
|
|
|
|
// cooperative channel closure. So we'll forward the request to
|
|
|
|
// the htlc switch which will handle the negotiation and
|
|
|
|
// broadcast details.
|
2018-06-15 03:14:31 +03:00
|
|
|
updateChan, errChan = r.server.htlcSwitch.CloseLink(
|
2018-07-28 04:39:38 +03:00
|
|
|
chanPoint, htlcswitch.CloseRegular, feeRate,
|
2018-06-15 03:14:31 +03:00
|
|
|
)
|
2017-01-04 03:02:51 +03:00
|
|
|
}
|
2016-08-31 02:52:53 +03:00
|
|
|
out:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case err := <-errChan:
|
|
|
|
rpcsLog.Errorf("[closechannel] unable to close "+
|
2017-01-04 03:02:51 +03:00
|
|
|
"ChannelPoint(%v): %v", chanPoint, err)
|
2016-07-08 01:30:55 +03:00
|
|
|
return err
|
2016-08-31 02:52:53 +03:00
|
|
|
case closingUpdate := <-updateChan:
|
2016-08-31 05:34:13 +03:00
|
|
|
rpcsLog.Tracef("[closechannel] sending update: %v",
|
|
|
|
closingUpdate)
|
2016-08-31 02:52:53 +03:00
|
|
|
if err := updateStream.Send(closingUpdate); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// If a final channel closing updates is being sent,
|
|
|
|
// then we can break out of our dispatch loop as we no
|
|
|
|
// longer need to process any further updates.
|
|
|
|
switch closeUpdate := closingUpdate.Update.(type) {
|
|
|
|
case *lnrpc.CloseStatusUpdate_ChanClose:
|
2017-01-06 00:56:27 +03:00
|
|
|
h, _ := chainhash.NewHash(closeUpdate.ChanClose.ClosingTxid)
|
2016-09-12 22:42:26 +03:00
|
|
|
rpcsLog.Infof("[closechannel] close completed: "+
|
2016-08-31 02:52:53 +03:00
|
|
|
"txid(%v)", h)
|
|
|
|
break out
|
|
|
|
}
|
|
|
|
case <-r.quit:
|
|
|
|
return nil
|
2016-07-08 01:30:55 +03:00
|
|
|
}
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
|
|
|
|
2016-07-08 01:30:55 +03:00
|
|
|
return nil
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
2016-01-02 07:27:40 +03:00
|
|
|
|
2018-02-07 06:13:07 +03:00
|
|
|
// fetchActiveChannel attempts to locate a channel identified by its channel
|
2017-01-04 03:02:51 +03:00
|
|
|
// point from the database's set of all currently opened channels.
|
|
|
|
func (r *rpcServer) fetchActiveChannel(chanPoint wire.OutPoint) (*lnwallet.LightningChannel, error) {
|
|
|
|
dbChannels, err := r.server.chanDB.FetchAllChannels()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the channels fetched, attempt to locate the target channel
|
|
|
|
// according to its channel point.
|
|
|
|
var dbChan *channeldb.OpenChannel
|
|
|
|
for _, dbChannel := range dbChannels {
|
2017-07-31 00:25:03 +03:00
|
|
|
if dbChannel.FundingOutpoint == chanPoint {
|
2017-01-04 03:02:51 +03:00
|
|
|
dbChan = dbChannel
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the channel cannot be located, then we exit with an error to the
|
|
|
|
// caller.
|
|
|
|
if dbChan == nil {
|
|
|
|
return nil, fmt.Errorf("unable to find channel")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we create a fully populated channel state machine which
|
|
|
|
// uses the db channel as backing storage.
|
2018-01-17 07:03:28 +03:00
|
|
|
return lnwallet.NewLightningChannel(
|
2018-01-19 01:07:56 +03:00
|
|
|
r.server.cc.wallet.Cfg.Signer, nil, dbChan,
|
2018-01-17 07:03:28 +03:00
|
|
|
)
|
2017-01-04 03:02:51 +03:00
|
|
|
}
|
|
|
|
|
2017-07-28 02:39:49 +03:00
|
|
|
// GetInfo returns general information concerning the lightning node including
|
2018-02-07 06:13:07 +03:00
|
|
|
// its identity pubkey, alias, the chains it is connected to, and information
|
2017-07-28 02:39:49 +03:00
|
|
|
// concerning the number of open+pending channels.
|
2016-07-06 04:57:08 +03:00
|
|
|
func (r *rpcServer) GetInfo(ctx context.Context,
|
|
|
|
in *lnrpc.GetInfoRequest) (*lnrpc.GetInfoResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2016-07-06 04:57:08 +03:00
|
|
|
var activeChannels uint32
|
|
|
|
serverPeers := r.server.Peers()
|
|
|
|
for _, serverPeer := range serverPeers {
|
|
|
|
activeChannels += uint32(len(serverPeer.ChannelSnapshots()))
|
|
|
|
}
|
|
|
|
|
2017-08-09 04:01:15 +03:00
|
|
|
pendingChannels, err := r.server.chanDB.FetchPendingChannels()
|
2017-01-23 10:31:01 +03:00
|
|
|
if err != nil {
|
2017-08-09 04:01:15 +03:00
|
|
|
return nil, fmt.Errorf("unable to get retrieve pending "+
|
2017-06-17 01:11:02 +03:00
|
|
|
"channels: %v", err)
|
2017-01-23 10:31:01 +03:00
|
|
|
}
|
2017-08-09 04:01:15 +03:00
|
|
|
nPendingChannels := uint32(len(pendingChannels))
|
2017-01-23 10:31:01 +03:00
|
|
|
|
2016-07-06 04:57:08 +03:00
|
|
|
idPub := r.server.identityPriv.PubKey().SerializeCompressed()
|
2018-01-07 08:54:14 +03:00
|
|
|
encodedIDPub := hex.EncodeToString(idPub)
|
2016-07-06 04:57:08 +03:00
|
|
|
|
2017-05-18 21:55:25 +03:00
|
|
|
bestHash, bestHeight, err := r.server.cc.chainIO.GetBestBlock()
|
2016-12-13 02:34:47 +03:00
|
|
|
if err != nil {
|
2017-05-02 22:31:35 +03:00
|
|
|
return nil, fmt.Errorf("unable to get best block info: %v", err)
|
2016-12-13 02:34:47 +03:00
|
|
|
}
|
|
|
|
|
2017-12-10 10:42:46 +03:00
|
|
|
isSynced, bestHeaderTimestamp, err := r.server.cc.wallet.IsSynced()
|
2016-12-13 02:34:47 +03:00
|
|
|
if err != nil {
|
2017-05-15 05:20:26 +03:00
|
|
|
return nil, fmt.Errorf("unable to sync PoV of the wallet "+
|
|
|
|
"with current best block in the main chain: %v", err)
|
2016-12-13 02:34:47 +03:00
|
|
|
}
|
|
|
|
|
2017-05-03 05:51:33 +03:00
|
|
|
activeChains := make([]string, registeredChains.NumActiveChains())
|
|
|
|
for i, chain := range registeredChains.ActiveChains() {
|
|
|
|
activeChains[i] = chain.String()
|
|
|
|
}
|
2017-06-06 01:18:06 +03:00
|
|
|
|
2018-01-07 08:54:14 +03:00
|
|
|
// Check if external IP addresses were provided to lnd and use them
|
|
|
|
// to set the URIs.
|
|
|
|
nodeAnn, err := r.server.genNodeAnnouncement(false)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to retrieve current fully signed "+
|
|
|
|
"node announcement: %v", err)
|
|
|
|
}
|
|
|
|
addrs := nodeAnn.Addresses
|
|
|
|
uris := make([]string, len(addrs))
|
|
|
|
for i, addr := range addrs {
|
|
|
|
uris[i] = fmt.Sprintf("%s@%s", encodedIDPub, addr.String())
|
|
|
|
}
|
|
|
|
|
2017-01-15 05:16:53 +03:00
|
|
|
// TODO(roasbeef): add synced height n stuff
|
2016-07-06 04:57:08 +03:00
|
|
|
return &lnrpc.GetInfoResponse{
|
2017-12-10 10:42:46 +03:00
|
|
|
IdentityPubkey: encodedIDPub,
|
|
|
|
NumPendingChannels: nPendingChannels,
|
|
|
|
NumActiveChannels: activeChannels,
|
|
|
|
NumPeers: uint32(len(serverPeers)),
|
|
|
|
BlockHeight: uint32(bestHeight),
|
|
|
|
BlockHash: bestHash.String(),
|
|
|
|
SyncedToChain: isSynced,
|
2018-03-24 12:28:20 +03:00
|
|
|
Testnet: isTestnet(&activeNetParams),
|
2017-12-10 10:42:46 +03:00
|
|
|
Chains: activeChains,
|
|
|
|
Uris: uris,
|
|
|
|
Alias: nodeAnn.Alias.String(),
|
|
|
|
BestHeaderTimestamp: int64(bestHeaderTimestamp),
|
2018-04-04 06:40:44 +03:00
|
|
|
Version: version(),
|
2016-07-06 04:57:08 +03:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// ListPeers returns a verbose listing of all currently active peers.
|
|
|
|
func (r *rpcServer) ListPeers(ctx context.Context,
|
|
|
|
in *lnrpc.ListPeersRequest) (*lnrpc.ListPeersResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2016-06-29 21:31:29 +03:00
|
|
|
rpcsLog.Tracef("[listpeers] request")
|
2016-06-21 21:52:09 +03:00
|
|
|
|
|
|
|
serverPeers := r.server.Peers()
|
|
|
|
resp := &lnrpc.ListPeersResponse{
|
|
|
|
Peers: make([]*lnrpc.Peer, 0, len(serverPeers)),
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, serverPeer := range serverPeers {
|
2017-01-26 05:24:59 +03:00
|
|
|
var (
|
|
|
|
satSent int64
|
|
|
|
satRecv int64
|
|
|
|
)
|
|
|
|
|
|
|
|
// In order to display the total number of satoshis of outbound
|
|
|
|
// (sent) and inbound (recv'd) satoshis that have been
|
|
|
|
// transported through this peer, we'll sum up the sent/recv'd
|
2017-05-18 21:55:25 +03:00
|
|
|
// values for each of the active channels we have with the
|
2017-01-26 05:24:59 +03:00
|
|
|
// peer.
|
|
|
|
chans := serverPeer.ChannelSnapshots()
|
|
|
|
for _, c := range chans {
|
2017-11-11 02:24:49 +03:00
|
|
|
satSent += int64(c.TotalMSatSent.ToSatoshis())
|
|
|
|
satRecv += int64(c.TotalMSatReceived.ToSatoshis())
|
2017-01-26 05:24:59 +03:00
|
|
|
}
|
|
|
|
|
2016-10-28 05:49:10 +03:00
|
|
|
nodePub := serverPeer.addr.IdentityKey.SerializeCompressed()
|
2016-06-21 21:52:09 +03:00
|
|
|
peer := &lnrpc.Peer{
|
2016-10-27 00:31:41 +03:00
|
|
|
PubKey: hex.EncodeToString(nodePub),
|
|
|
|
Address: serverPeer.conn.RemoteAddr().String(),
|
2018-04-03 08:16:10 +03:00
|
|
|
Inbound: serverPeer.inbound,
|
2016-10-27 00:31:41 +03:00
|
|
|
BytesRecv: atomic.LoadUint64(&serverPeer.bytesReceived),
|
|
|
|
BytesSent: atomic.LoadUint64(&serverPeer.bytesSent),
|
2017-01-26 05:24:59 +03:00
|
|
|
SatSent: satSent,
|
|
|
|
SatRecv: satRecv,
|
|
|
|
PingTime: serverPeer.PingTime(),
|
2016-06-21 21:52:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
resp.Peers = append(resp.Peers, peer)
|
|
|
|
}
|
|
|
|
|
2016-06-29 21:31:29 +03:00
|
|
|
rpcsLog.Debugf("[listpeers] yielded %v peers", serverPeers)
|
2016-06-21 21:52:09 +03:00
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2017-11-26 16:07:55 +03:00
|
|
|
// WalletBalance returns total unspent outputs(confirmed and unconfirmed), all
|
|
|
|
// confirmed unspent outputs and all unconfirmed unspent outputs under control
|
2016-06-21 21:46:27 +03:00
|
|
|
// by the wallet. This method can be modified by having the request specify
|
|
|
|
// only witness outputs should be factored into the final output sum.
|
2016-08-31 02:52:53 +03:00
|
|
|
// TODO(roasbeef): add async hooks into wallet balance changes
|
2016-06-21 21:46:27 +03:00
|
|
|
func (r *rpcServer) WalletBalance(ctx context.Context,
|
|
|
|
in *lnrpc.WalletBalanceRequest) (*lnrpc.WalletBalanceResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2017-11-26 16:07:55 +03:00
|
|
|
// Get total balance, from txs that have >= 0 confirmations.
|
2018-02-18 02:38:06 +03:00
|
|
|
totalBal, err := r.server.cc.wallet.ConfirmedBalance(0)
|
2016-08-13 01:53:18 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2016-06-21 21:46:27 +03:00
|
|
|
}
|
|
|
|
|
2017-11-26 16:07:55 +03:00
|
|
|
// Get confirmed balance, from txs that have >= 1 confirmations.
|
2018-02-18 02:38:06 +03:00
|
|
|
confirmedBal, err := r.server.cc.wallet.ConfirmedBalance(1)
|
2017-11-26 16:07:55 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
// Get unconfirmed balance, from txs with 0 confirmations.
|
2017-11-26 16:07:55 +03:00
|
|
|
unconfirmedBal := totalBal - confirmedBal
|
|
|
|
|
|
|
|
rpcsLog.Debugf("[walletbalance] Total balance=%v", totalBal)
|
2016-06-21 21:46:27 +03:00
|
|
|
|
2017-02-10 02:28:32 +03:00
|
|
|
return &lnrpc.WalletBalanceResponse{
|
2017-11-26 16:07:55 +03:00
|
|
|
TotalBalance: int64(totalBal),
|
|
|
|
ConfirmedBalance: int64(confirmedBal),
|
|
|
|
UnconfirmedBalance: int64(unconfirmedBal),
|
2017-02-10 02:28:32 +03:00
|
|
|
}, nil
|
2015-12-31 08:40:41 +03:00
|
|
|
}
|
2016-07-08 01:33:52 +03:00
|
|
|
|
2016-09-15 22:24:52 +03:00
|
|
|
// ChannelBalance returns the total available channel flow across all open
|
|
|
|
// channels in satoshis.
|
2016-09-15 21:59:51 +03:00
|
|
|
func (r *rpcServer) ChannelBalance(ctx context.Context,
|
|
|
|
in *lnrpc.ChannelBalanceRequest) (*lnrpc.ChannelBalanceResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2018-03-27 15:25:46 +03:00
|
|
|
openChannels, err := r.server.chanDB.FetchAllOpenChannels()
|
2016-11-12 02:48:15 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-03-27 15:25:46 +03:00
|
|
|
var balance btcutil.Amount
|
|
|
|
for _, channel := range openChannels {
|
|
|
|
balance += channel.LocalCommitment.LocalBalance.ToSatoshis()
|
|
|
|
}
|
|
|
|
|
|
|
|
pendingChannels, err := r.server.chanDB.FetchPendingChannels()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var pendingOpenBalance btcutil.Amount
|
|
|
|
for _, channel := range pendingChannels {
|
|
|
|
pendingOpenBalance += channel.LocalCommitment.LocalBalance.ToSatoshis()
|
2016-09-15 21:59:51 +03:00
|
|
|
}
|
|
|
|
|
2018-04-01 02:40:50 +03:00
|
|
|
return &lnrpc.ChannelBalanceResponse{
|
|
|
|
Balance: int64(balance),
|
|
|
|
PendingOpenBalance: int64(pendingOpenBalance),
|
|
|
|
}, nil
|
2016-09-15 21:59:51 +03:00
|
|
|
}
|
|
|
|
|
2016-07-08 01:33:52 +03:00
|
|
|
// PendingChannels returns a list of all the channels that are currently
|
|
|
|
// considered "pending". A channel is pending if it has finished the funding
|
|
|
|
// workflow and is waiting for confirmations for the funding txn, or is in the
|
2017-05-05 02:13:13 +03:00
|
|
|
// process of closure, either initiated cooperatively or non-cooperatively.
|
2016-07-08 01:33:52 +03:00
|
|
|
func (r *rpcServer) PendingChannels(ctx context.Context,
|
2018-01-04 23:20:25 +03:00
|
|
|
in *lnrpc.PendingChannelsRequest) (*lnrpc.PendingChannelsResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2017-05-05 02:13:13 +03:00
|
|
|
rpcsLog.Debugf("[pendingchannels]")
|
2016-07-08 01:33:52 +03:00
|
|
|
|
2018-01-04 23:20:25 +03:00
|
|
|
resp := &lnrpc.PendingChannelsResponse{}
|
2017-05-05 02:13:13 +03:00
|
|
|
|
|
|
|
// First, we'll populate the response with all the channels that are
|
|
|
|
// soon to be opened. We can easily fetch this data from the database
|
|
|
|
// and map the db struct to the proto response.
|
|
|
|
pendingOpenChannels, err := r.server.chanDB.FetchPendingChannels()
|
|
|
|
if err != nil {
|
2018-01-17 07:21:18 +03:00
|
|
|
rpcsLog.Errorf("unable to fetch pending channels: %v", err)
|
2017-05-05 02:13:13 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
2018-01-04 23:20:25 +03:00
|
|
|
resp.PendingOpenChannels = make([]*lnrpc.PendingChannelsResponse_PendingOpenChannel,
|
2017-05-05 02:13:13 +03:00
|
|
|
len(pendingOpenChannels))
|
|
|
|
for i, pendingChan := range pendingOpenChannels {
|
|
|
|
pub := pendingChan.IdentityPub.SerializeCompressed()
|
2017-05-17 05:14:48 +03:00
|
|
|
|
|
|
|
// As this is required for display purposes, we'll calculate
|
|
|
|
// the weight of the commitment transaction. We also add on the
|
|
|
|
// estimated weight of the witness to calculate the weight of
|
|
|
|
// the transaction if it were to be immediately unilaterally
|
|
|
|
// broadcast.
|
|
|
|
// TODO(roasbeef): query for funding tx from wallet, display
|
|
|
|
// that also?
|
2017-11-11 02:24:49 +03:00
|
|
|
localCommitment := pendingChan.LocalCommitment
|
|
|
|
utx := btcutil.NewTx(localCommitment.CommitTx)
|
2017-05-17 05:14:48 +03:00
|
|
|
commitBaseWeight := blockchain.GetTransactionWeight(utx)
|
|
|
|
commitWeight := commitBaseWeight + lnwallet.WitnessCommitmentTxWeight
|
|
|
|
|
2018-01-04 23:20:25 +03:00
|
|
|
resp.PendingOpenChannels[i] = &lnrpc.PendingChannelsResponse_PendingOpenChannel{
|
|
|
|
Channel: &lnrpc.PendingChannelsResponse_PendingChannel{
|
2017-05-05 02:13:13 +03:00
|
|
|
RemoteNodePub: hex.EncodeToString(pub),
|
2017-07-31 00:25:03 +03:00
|
|
|
ChannelPoint: pendingChan.FundingOutpoint.String(),
|
2017-05-05 02:13:13 +03:00
|
|
|
Capacity: int64(pendingChan.Capacity),
|
2017-11-11 02:24:49 +03:00
|
|
|
LocalBalance: int64(localCommitment.LocalBalance.ToSatoshis()),
|
|
|
|
RemoteBalance: int64(localCommitment.RemoteBalance.ToSatoshis()),
|
2017-05-05 02:13:13 +03:00
|
|
|
},
|
2018-01-09 04:39:46 +03:00
|
|
|
CommitWeight: commitWeight,
|
|
|
|
CommitFee: int64(localCommitment.CommitFee),
|
|
|
|
FeePerKw: int64(localCommitment.FeePerKw),
|
2017-05-05 02:13:13 +03:00
|
|
|
// TODO(roasbeef): need to track confirmation height
|
2017-01-23 10:31:01 +03:00
|
|
|
}
|
2017-05-05 02:13:13 +03:00
|
|
|
}
|
|
|
|
|
2017-05-18 21:55:25 +03:00
|
|
|
_, currentHeight, err := r.server.cc.chainIO.GetBestBlock()
|
2017-05-05 02:13:13 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, we'll examine the channels that are soon to be closed so we
|
|
|
|
// can populate these fields within the response.
|
|
|
|
pendingCloseChannels, err := r.server.chanDB.FetchClosedChannels(true)
|
|
|
|
if err != nil {
|
2018-01-17 07:21:18 +03:00
|
|
|
rpcsLog.Errorf("unable to fetch closed channels: %v", err)
|
2017-05-05 02:13:13 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
for _, pendingClose := range pendingCloseChannels {
|
|
|
|
// First construct the channel struct itself, this will be
|
|
|
|
// needed regardless of how this channel was closed.
|
|
|
|
pub := pendingClose.RemotePub.SerializeCompressed()
|
|
|
|
chanPoint := pendingClose.ChanPoint
|
2018-01-04 23:20:25 +03:00
|
|
|
channel := &lnrpc.PendingChannelsResponse_PendingChannel{
|
2017-05-05 02:13:13 +03:00
|
|
|
RemoteNodePub: hex.EncodeToString(pub),
|
|
|
|
ChannelPoint: chanPoint.String(),
|
|
|
|
Capacity: int64(pendingClose.Capacity),
|
2017-05-15 05:20:26 +03:00
|
|
|
LocalBalance: int64(pendingClose.SettledBalance),
|
2017-05-05 02:13:13 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
closeTXID := pendingClose.ClosingTXID.String()
|
|
|
|
|
|
|
|
switch pendingClose.CloseType {
|
|
|
|
|
|
|
|
// If the channel was closed cooperatively, then we'll only
|
|
|
|
// need to tack on the closing txid.
|
2018-05-24 11:29:20 +03:00
|
|
|
// TODO(halseth): remove. After recent changes, a coop closed
|
|
|
|
// channel should never be in the "pending close" state.
|
|
|
|
// Keeping for now to let someone that upgraded in the middle
|
|
|
|
// of a close let their closing tx confirm.
|
2017-05-05 02:13:13 +03:00
|
|
|
case channeldb.CooperativeClose:
|
|
|
|
resp.PendingClosingChannels = append(
|
|
|
|
resp.PendingClosingChannels,
|
2018-01-04 23:20:25 +03:00
|
|
|
&lnrpc.PendingChannelsResponse_ClosedChannel{
|
2017-05-05 02:13:13 +03:00
|
|
|
Channel: channel,
|
|
|
|
ClosingTxid: closeTXID,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
2017-05-15 04:51:26 +03:00
|
|
|
resp.TotalLimboBalance += channel.LocalBalance
|
|
|
|
|
2017-05-05 02:13:13 +03:00
|
|
|
// If the channel was force closed, then we'll need to query
|
|
|
|
// the utxoNursery for additional information.
|
2018-03-27 15:25:46 +03:00
|
|
|
// TODO(halseth): distinguish remote and local case?
|
|
|
|
case channeldb.LocalForceClose, channeldb.RemoteForceClose:
|
2018-01-04 23:20:25 +03:00
|
|
|
forceClose := &lnrpc.PendingChannelsResponse_ForceClosedChannel{
|
2017-05-05 02:13:13 +03:00
|
|
|
Channel: channel,
|
|
|
|
ClosingTxid: closeTXID,
|
|
|
|
}
|
|
|
|
|
2017-05-15 05:20:26 +03:00
|
|
|
// Query for the maturity state for this force closed
|
|
|
|
// channel. If we didn't have any time-locked outputs,
|
|
|
|
// then the nursery may not know of the contract.
|
2017-05-05 02:13:13 +03:00
|
|
|
nurseryInfo, err := r.server.utxoNursery.NurseryReport(&chanPoint)
|
2017-05-15 05:20:26 +03:00
|
|
|
if err != nil && err != ErrContractNotFound {
|
|
|
|
return nil, fmt.Errorf("unable to obtain "+
|
|
|
|
"nursery report for ChannelPoint(%v): %v",
|
|
|
|
chanPoint, err)
|
2016-07-08 01:33:52 +03:00
|
|
|
}
|
2017-05-05 02:13:13 +03:00
|
|
|
|
|
|
|
// If the nursery knows of this channel, then we can
|
|
|
|
// populate information detailing exactly how much
|
|
|
|
// funds are time locked and also the height in which
|
|
|
|
// we can ultimately sweep the funds into the wallet.
|
|
|
|
if nurseryInfo != nil {
|
|
|
|
forceClose.LimboBalance = int64(nurseryInfo.limboBalance)
|
2017-11-09 06:27:13 +03:00
|
|
|
forceClose.RecoveredBalance = int64(nurseryInfo.recoveredBalance)
|
2017-05-05 02:13:13 +03:00
|
|
|
forceClose.MaturityHeight = nurseryInfo.maturityHeight
|
|
|
|
|
|
|
|
// If the transaction has been confirmed, then
|
|
|
|
// we can compute how many blocks it has left.
|
|
|
|
if forceClose.MaturityHeight != 0 {
|
2017-11-09 10:08:13 +03:00
|
|
|
forceClose.BlocksTilMaturity =
|
|
|
|
int32(forceClose.MaturityHeight) -
|
|
|
|
currentHeight
|
2017-05-05 02:13:13 +03:00
|
|
|
}
|
|
|
|
|
2017-11-09 06:27:13 +03:00
|
|
|
for _, htlcReport := range nurseryInfo.htlcs {
|
|
|
|
// TODO(conner) set incoming flag
|
|
|
|
// appropriately after handling incoming
|
|
|
|
// incubation
|
|
|
|
htlc := &lnrpc.PendingHTLC{
|
|
|
|
Incoming: false,
|
|
|
|
Amount: int64(htlcReport.amount),
|
|
|
|
Outpoint: htlcReport.outpoint.String(),
|
|
|
|
MaturityHeight: htlcReport.maturityHeight,
|
|
|
|
Stage: htlcReport.stage,
|
|
|
|
}
|
|
|
|
|
|
|
|
if htlc.MaturityHeight != 0 {
|
|
|
|
htlc.BlocksTilMaturity =
|
|
|
|
int32(htlc.MaturityHeight) -
|
|
|
|
currentHeight
|
|
|
|
}
|
|
|
|
|
|
|
|
forceClose.PendingHtlcs = append(forceClose.PendingHtlcs,
|
|
|
|
htlc)
|
2017-05-05 02:13:13 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
resp.TotalLimboBalance += int64(nurseryInfo.limboBalance)
|
|
|
|
}
|
|
|
|
|
|
|
|
resp.PendingForceClosingChannels = append(
|
|
|
|
resp.PendingForceClosingChannels,
|
|
|
|
forceClose,
|
|
|
|
)
|
2016-07-08 01:33:52 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-27 15:25:46 +03:00
|
|
|
// We'll also fetch all channels that are open, but have had their
|
|
|
|
// commitment broadcasted, meaning they are waiting for the closing
|
|
|
|
// transaction to confirm.
|
|
|
|
waitingCloseChans, err := r.server.chanDB.FetchWaitingCloseChannels()
|
|
|
|
if err != nil {
|
|
|
|
rpcsLog.Errorf("unable to fetch channels waiting close: %v",
|
|
|
|
err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, waitingClose := range waitingCloseChans {
|
|
|
|
pub := waitingClose.IdentityPub.SerializeCompressed()
|
|
|
|
chanPoint := waitingClose.FundingOutpoint
|
|
|
|
channel := &lnrpc.PendingChannelsResponse_PendingChannel{
|
|
|
|
RemoteNodePub: hex.EncodeToString(pub),
|
|
|
|
ChannelPoint: chanPoint.String(),
|
|
|
|
Capacity: int64(waitingClose.Capacity),
|
|
|
|
LocalBalance: int64(waitingClose.LocalCommitment.LocalBalance.ToSatoshis()),
|
|
|
|
}
|
|
|
|
|
|
|
|
// A close tx has been broadcasted, all our balance will be in
|
|
|
|
// limbo until it confirms.
|
|
|
|
resp.WaitingCloseChannels = append(
|
|
|
|
resp.WaitingCloseChannels,
|
|
|
|
&lnrpc.PendingChannelsResponse_WaitingCloseChannel{
|
|
|
|
Channel: channel,
|
|
|
|
LimboBalance: channel.LocalBalance,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
resp.TotalLimboBalance += channel.LocalBalance
|
|
|
|
}
|
|
|
|
|
2017-05-05 02:13:13 +03:00
|
|
|
return resp, nil
|
2016-07-08 01:33:52 +03:00
|
|
|
}
|
2016-07-13 03:46:25 +03:00
|
|
|
|
2018-06-15 03:14:31 +03:00
|
|
|
// ClosedChannels returns a list of all the channels have been closed.
|
2018-05-24 12:35:34 +03:00
|
|
|
// This does not include channels that are still in the process of closing.
|
|
|
|
func (r *rpcServer) ClosedChannels(ctx context.Context,
|
2018-06-15 03:14:31 +03:00
|
|
|
in *lnrpc.ClosedChannelsRequest) (*lnrpc.ClosedChannelsResponse,
|
2018-05-24 12:35:34 +03:00
|
|
|
error) {
|
|
|
|
|
|
|
|
// Show all channels when no filter flags are set.
|
2018-06-15 03:14:31 +03:00
|
|
|
filterResults := in.Cooperative || in.LocalForce ||
|
2018-05-24 12:35:34 +03:00
|
|
|
in.RemoteForce || in.Breach || in.FundingCanceled
|
|
|
|
|
|
|
|
resp := &lnrpc.ClosedChannelsResponse{}
|
|
|
|
|
|
|
|
dbChannels, err := r.server.chanDB.FetchClosedChannels(false)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-06-15 03:14:31 +03:00
|
|
|
// In order to make the response easier to parse for clients, we'll
|
|
|
|
// sort the set of closed channels by their closing height before
|
|
|
|
// serializing the proto response.
|
|
|
|
sort.Slice(dbChannels, func(i, j int) bool {
|
|
|
|
return dbChannels[i].CloseHeight < dbChannels[j].CloseHeight
|
|
|
|
})
|
|
|
|
|
2018-05-24 12:35:34 +03:00
|
|
|
for _, dbChannel := range dbChannels {
|
|
|
|
if dbChannel.IsPending {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
nodePub := dbChannel.RemotePub
|
|
|
|
nodeID := hex.EncodeToString(nodePub.SerializeCompressed())
|
|
|
|
|
|
|
|
var closeType lnrpc.ChannelCloseSummary_ClosureType
|
|
|
|
switch dbChannel.CloseType {
|
|
|
|
case channeldb.CooperativeClose:
|
|
|
|
if filterResults && !in.Cooperative {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
closeType = lnrpc.ChannelCloseSummary_COOPERATIVE_CLOSE
|
|
|
|
case channeldb.LocalForceClose:
|
|
|
|
if filterResults && !in.LocalForce {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
closeType = lnrpc.ChannelCloseSummary_LOCAL_FORCE_CLOSE
|
|
|
|
case channeldb.RemoteForceClose:
|
|
|
|
if filterResults && !in.RemoteForce {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
closeType = lnrpc.ChannelCloseSummary_REMOTE_FORCE_CLOSE
|
|
|
|
case channeldb.BreachClose:
|
|
|
|
if filterResults && !in.Breach {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
closeType = lnrpc.ChannelCloseSummary_BREACH_CLOSE
|
|
|
|
case channeldb.FundingCanceled:
|
|
|
|
if filterResults && !in.FundingCanceled {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
closeType = lnrpc.ChannelCloseSummary_FUNDING_CANCELED
|
|
|
|
}
|
|
|
|
|
|
|
|
channel := &lnrpc.ChannelCloseSummary{
|
|
|
|
Capacity: int64(dbChannel.Capacity),
|
|
|
|
RemotePubkey: nodeID,
|
|
|
|
CloseHeight: dbChannel.CloseHeight,
|
|
|
|
CloseType: closeType,
|
|
|
|
ChannelPoint: dbChannel.ChanPoint.String(),
|
|
|
|
ChanId: dbChannel.ShortChanID.ToUint64(),
|
|
|
|
SettledBalance: int64(dbChannel.SettledBalance),
|
|
|
|
TimeLockedBalance: int64(dbChannel.TimeLockedBalance),
|
|
|
|
ChainHash: dbChannel.ChainHash.String(),
|
|
|
|
ClosingTxHash: dbChannel.ClosingTXID.String(),
|
|
|
|
}
|
|
|
|
|
|
|
|
resp.Channels = append(resp.Channels, channel)
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2017-07-28 02:39:49 +03:00
|
|
|
// ListChannels returns a description of all the open channels that this node
|
|
|
|
// is a participant in.
|
2016-09-26 06:04:03 +03:00
|
|
|
func (r *rpcServer) ListChannels(ctx context.Context,
|
|
|
|
in *lnrpc.ListChannelsRequest) (*lnrpc.ListChannelsResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2018-03-13 22:11:19 +03:00
|
|
|
if in.ActiveOnly && in.InactiveOnly {
|
|
|
|
return nil, fmt.Errorf("either `active_only` or " +
|
|
|
|
"`inactive_only` can be set, but not both")
|
|
|
|
}
|
|
|
|
|
|
|
|
if in.PublicOnly && in.PrivateOnly {
|
|
|
|
return nil, fmt.Errorf("either `public_only` or " +
|
|
|
|
"`private_only` can be set, but not both")
|
|
|
|
}
|
|
|
|
|
2016-09-26 06:04:03 +03:00
|
|
|
resp := &lnrpc.ListChannelsResponse{}
|
|
|
|
|
2016-12-27 08:50:19 +03:00
|
|
|
graph := r.server.chanDB.ChannelGraph()
|
|
|
|
|
2018-03-27 15:25:46 +03:00
|
|
|
dbChannels, err := r.server.chanDB.FetchAllOpenChannels()
|
2016-10-27 01:09:01 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
rpcsLog.Infof("[listchannels] fetched %v channels from DB",
|
|
|
|
len(dbChannels))
|
|
|
|
|
|
|
|
for _, dbChannel := range dbChannels {
|
2017-03-09 01:45:58 +03:00
|
|
|
nodePub := dbChannel.IdentityPub
|
|
|
|
nodeID := hex.EncodeToString(nodePub.SerializeCompressed())
|
2017-07-31 00:25:03 +03:00
|
|
|
chanPoint := dbChannel.FundingOutpoint
|
2016-12-27 08:50:19 +03:00
|
|
|
|
|
|
|
// With the channel point known, retrieve the network channel
|
|
|
|
// ID from the database.
|
2017-01-08 06:19:24 +03:00
|
|
|
var chanID uint64
|
2017-07-31 00:25:03 +03:00
|
|
|
chanID, _ = graph.ChannelID(&chanPoint)
|
2016-10-27 01:09:01 +03:00
|
|
|
|
2017-03-09 01:45:58 +03:00
|
|
|
var peerOnline bool
|
2017-08-09 01:49:32 +03:00
|
|
|
if _, err := r.server.FindPeer(nodePub); err == nil {
|
2017-03-09 01:45:58 +03:00
|
|
|
peerOnline = true
|
|
|
|
}
|
|
|
|
|
2017-11-19 03:30:51 +03:00
|
|
|
channelID := lnwire.NewChanIDFromOutPoint(&chanPoint)
|
|
|
|
var linkActive bool
|
2017-12-06 04:51:06 +03:00
|
|
|
if link, err := r.server.htlcSwitch.GetLink(channelID); err == nil {
|
|
|
|
// A channel is only considered active if it is known
|
|
|
|
// by the switch *and* able to forward
|
|
|
|
// incoming/outgoing payments.
|
|
|
|
linkActive = link.EligibleToForward()
|
2017-11-19 03:30:51 +03:00
|
|
|
}
|
|
|
|
|
2018-03-13 22:11:19 +03:00
|
|
|
// Next, we'll determine whether we should add this channel to
|
|
|
|
// our list depending on the type of channels requested to us.
|
|
|
|
isActive := peerOnline && linkActive
|
|
|
|
isPublic := dbChannel.ChannelFlags&lnwire.FFAnnounceChannel != 0
|
|
|
|
|
|
|
|
// We'll only skip returning this channel if we were requested
|
|
|
|
// for a specific kind and this channel doesn't satisfy it.
|
|
|
|
switch {
|
|
|
|
case in.ActiveOnly && !isActive:
|
|
|
|
continue
|
|
|
|
case in.InactiveOnly && isActive:
|
|
|
|
continue
|
|
|
|
case in.PublicOnly && !isPublic:
|
|
|
|
continue
|
|
|
|
case in.PrivateOnly && isPublic:
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-05-17 05:14:48 +03:00
|
|
|
// As this is required for display purposes, we'll calculate
|
|
|
|
// the weight of the commitment transaction. We also add on the
|
|
|
|
// estimated weight of the witness to calculate the weight of
|
|
|
|
// the transaction if it were to be immediately unilaterally
|
|
|
|
// broadcast.
|
2017-11-11 02:24:49 +03:00
|
|
|
localCommit := dbChannel.LocalCommitment
|
|
|
|
utx := btcutil.NewTx(localCommit.CommitTx)
|
2017-05-17 05:14:48 +03:00
|
|
|
commitBaseWeight := blockchain.GetTransactionWeight(utx)
|
|
|
|
commitWeight := commitBaseWeight + lnwallet.WitnessCommitmentTxWeight
|
|
|
|
|
2017-12-11 04:09:53 +03:00
|
|
|
localBalance := localCommit.LocalBalance
|
|
|
|
remoteBalance := localCommit.RemoteBalance
|
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
// As an artifact of our usage of mSAT internally, either party
|
2017-12-11 04:09:53 +03:00
|
|
|
// may end up in a state where they're holding a fractional
|
|
|
|
// amount of satoshis which can't be expressed within the
|
|
|
|
// actual commitment output. Since we round down when going
|
|
|
|
// from mSAT -> SAT, we may at any point be adding an
|
2017-12-12 02:31:12 +03:00
|
|
|
// additional SAT to miners fees. As a result, we display a
|
|
|
|
// commitment fee that accounts for this externally.
|
|
|
|
var sumOutputs btcutil.Amount
|
|
|
|
for _, txOut := range localCommit.CommitTx.TxOut {
|
|
|
|
sumOutputs += btcutil.Amount(txOut.Value)
|
2017-12-11 04:09:53 +03:00
|
|
|
}
|
2017-12-12 02:31:12 +03:00
|
|
|
externalCommitFee := dbChannel.Capacity - sumOutputs
|
2017-12-11 04:09:53 +03:00
|
|
|
|
2018-03-13 22:11:19 +03:00
|
|
|
channel := &lnrpc.Channel{
|
|
|
|
Active: isActive,
|
|
|
|
Private: !isPublic,
|
2016-11-19 03:20:44 +03:00
|
|
|
RemotePubkey: nodeID,
|
2016-12-27 08:50:19 +03:00
|
|
|
ChannelPoint: chanPoint.String(),
|
|
|
|
ChanId: chanID,
|
2016-11-19 03:20:44 +03:00
|
|
|
Capacity: int64(dbChannel.Capacity),
|
2017-12-11 04:09:53 +03:00
|
|
|
LocalBalance: int64(localBalance.ToSatoshis()),
|
|
|
|
RemoteBalance: int64(remoteBalance.ToSatoshis()),
|
2017-12-12 02:31:12 +03:00
|
|
|
CommitFee: int64(externalCommitFee),
|
2017-05-17 05:14:48 +03:00
|
|
|
CommitWeight: commitWeight,
|
2017-11-11 02:24:49 +03:00
|
|
|
FeePerKw: int64(localCommit.FeePerKw),
|
2017-08-22 09:25:41 +03:00
|
|
|
TotalSatoshisSent: int64(dbChannel.TotalMSatSent.ToSatoshis()),
|
|
|
|
TotalSatoshisReceived: int64(dbChannel.TotalMSatReceived.ToSatoshis()),
|
2017-11-11 02:24:49 +03:00
|
|
|
NumUpdates: localCommit.CommitHeight,
|
|
|
|
PendingHtlcs: make([]*lnrpc.HTLC, len(localCommit.Htlcs)),
|
2017-12-06 04:31:14 +03:00
|
|
|
CsvDelay: uint32(dbChannel.LocalChanCfg.CsvDelay),
|
2016-10-27 01:09:01 +03:00
|
|
|
}
|
|
|
|
|
2017-11-11 02:24:49 +03:00
|
|
|
for i, htlc := range localCommit.Htlcs {
|
2018-04-13 17:50:31 +03:00
|
|
|
var rHash [32]byte
|
|
|
|
copy(rHash[:], htlc.RHash[:])
|
2016-10-27 01:09:01 +03:00
|
|
|
channel.PendingHtlcs[i] = &lnrpc.HTLC{
|
|
|
|
Incoming: htlc.Incoming,
|
2017-11-16 05:23:51 +03:00
|
|
|
Amount: int64(htlc.Amt.ToSatoshis()),
|
2018-04-13 17:50:31 +03:00
|
|
|
HashLock: rHash[:],
|
2016-10-27 01:09:01 +03:00
|
|
|
ExpirationHeight: htlc.RefundTimeout,
|
|
|
|
}
|
2016-09-26 06:04:03 +03:00
|
|
|
}
|
|
|
|
|
2016-10-27 01:09:01 +03:00
|
|
|
resp.Channels = append(resp.Channels, channel)
|
2016-09-26 06:04:03 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2016-12-31 03:41:59 +03:00
|
|
|
// savePayment saves a successfully completed payment to the database for
|
|
|
|
// historical record keeping.
|
2018-06-07 06:40:28 +03:00
|
|
|
func (r *rpcServer) savePayment(route *routing.Route,
|
|
|
|
amount lnwire.MilliSatoshi, preImage []byte) error {
|
2016-12-21 12:19:01 +03:00
|
|
|
|
2016-12-31 03:41:59 +03:00
|
|
|
paymentPath := make([][33]byte, len(route.Hops))
|
2016-12-27 08:51:18 +03:00
|
|
|
for i, hop := range route.Hops {
|
2018-01-31 07:30:00 +03:00
|
|
|
hopPub := hop.Channel.Node.PubKeyBytes
|
|
|
|
copy(paymentPath[i][:], hopPub[:])
|
2016-12-31 03:41:59 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
payment := &channeldb.OutgoingPayment{
|
|
|
|
Invoice: channeldb.Invoice{
|
|
|
|
Terms: channeldb.ContractTerm{
|
2017-02-23 22:07:01 +03:00
|
|
|
Value: amount,
|
2016-12-31 03:41:59 +03:00
|
|
|
},
|
|
|
|
CreationDate: time.Now(),
|
|
|
|
},
|
|
|
|
Path: paymentPath,
|
|
|
|
Fee: route.TotalFees,
|
|
|
|
TimeLockLength: route.TotalTimeLock,
|
2016-12-05 14:59:36 +03:00
|
|
|
}
|
2017-12-14 04:04:18 +03:00
|
|
|
copy(payment.PaymentPreimage[:], preImage)
|
2016-12-31 03:41:59 +03:00
|
|
|
|
|
|
|
return r.server.chanDB.AddPayment(payment)
|
2016-12-05 14:59:36 +03:00
|
|
|
}
|
|
|
|
|
2017-09-05 19:08:02 +03:00
|
|
|
// validatePayReqExpiry checks if the passed payment request has expired. In
|
|
|
|
// the case it has expired, an error will be returned.
|
|
|
|
func validatePayReqExpiry(payReq *zpay32.Invoice) error {
|
|
|
|
expiry := payReq.Expiry()
|
|
|
|
validUntil := payReq.Timestamp.Add(expiry)
|
|
|
|
if time.Now().After(validUntil) {
|
|
|
|
return fmt.Errorf("invoice expired. Valid until %v", validUntil)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
// paymentStream enables different types of payment streams, such as:
|
|
|
|
// lnrpc.Lightning_SendPaymentServer and lnrpc.Lightning_SendToRouteServer to
|
|
|
|
// execute sendPayment. We use this struct as a sort of bridge to enable code
|
|
|
|
// re-use between SendPayment and SendToRoute.
|
2018-05-01 11:17:55 +03:00
|
|
|
type paymentStream struct {
|
2018-06-07 06:40:28 +03:00
|
|
|
recv func() (*rpcPaymentRequest, error)
|
2018-05-01 11:17:55 +03:00
|
|
|
send func(*lnrpc.SendResponse) error
|
|
|
|
}
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
// rpcPaymentRequest wraps lnrpc.SendRequest so that routes from
|
|
|
|
// lnrpc.SendToRouteRequest can be passed to sendPayment.
|
|
|
|
type rpcPaymentRequest struct {
|
2018-05-01 11:17:55 +03:00
|
|
|
*lnrpc.SendRequest
|
|
|
|
routes []*routing.Route
|
|
|
|
}
|
|
|
|
|
2018-04-19 17:27:37 +03:00
|
|
|
// calculateFeeLimit returns the fee limit in millisatoshis. If a percentage
|
|
|
|
// based fee limit has been requested, we'll factor in the ratio provided with
|
|
|
|
// the amount of the payment.
|
|
|
|
func calculateFeeLimit(feeLimit *lnrpc.FeeLimit,
|
|
|
|
amount lnwire.MilliSatoshi) lnwire.MilliSatoshi {
|
|
|
|
|
|
|
|
switch feeLimit.GetLimit().(type) {
|
|
|
|
case *lnrpc.FeeLimit_Fixed:
|
|
|
|
return lnwire.NewMSatFromSatoshis(
|
|
|
|
btcutil.Amount(feeLimit.GetFixed()),
|
|
|
|
)
|
|
|
|
case *lnrpc.FeeLimit_Percent:
|
|
|
|
return amount * lnwire.MilliSatoshi(feeLimit.GetPercent()) / 100
|
|
|
|
default:
|
|
|
|
// If a fee limit was not specified, we'll use the payment's
|
|
|
|
// amount as an upper bound in order to avoid payment attempts
|
|
|
|
// from incurring fees higher than the payment amount itself.
|
|
|
|
return amount
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-13 03:46:25 +03:00
|
|
|
// SendPayment dispatches a bi-directional streaming RPC for sending payments
|
|
|
|
// through the Lightning Network. A single RPC invocation creates a persistent
|
|
|
|
// bi-directional stream allowing clients to rapidly send payments through the
|
|
|
|
// Lightning Network with a single persistent connection.
|
2018-05-01 11:17:55 +03:00
|
|
|
func (r *rpcServer) SendPayment(stream lnrpc.Lightning_SendPaymentServer) error {
|
|
|
|
return r.sendPayment(&paymentStream{
|
2018-06-07 06:40:28 +03:00
|
|
|
recv: func() (*rpcPaymentRequest, error) {
|
2018-05-01 11:17:55 +03:00
|
|
|
req, err := stream.Recv()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
return &rpcPaymentRequest{
|
2018-05-01 11:17:55 +03:00
|
|
|
SendRequest: req,
|
|
|
|
}, nil
|
|
|
|
},
|
|
|
|
send: stream.Send,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// SendToRoute dispatches a bi-directional streaming RPC for sending payments
|
|
|
|
// through the Lightning Network via predefined routes passed in. A single RPC
|
|
|
|
// invocation creates a persistent bi-directional stream allowing clients to
|
2018-06-07 06:40:28 +03:00
|
|
|
// rapidly send payments through the Lightning Network with a single persistent
|
|
|
|
// connection.
|
2018-05-01 11:17:55 +03:00
|
|
|
func (r *rpcServer) SendToRoute(stream lnrpc.Lightning_SendToRouteServer) error {
|
|
|
|
return r.sendPayment(&paymentStream{
|
2018-06-07 06:40:28 +03:00
|
|
|
recv: func() (*rpcPaymentRequest, error) {
|
2018-05-01 11:17:55 +03:00
|
|
|
req, err := stream.Recv()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
graph := r.server.chanDB.ChannelGraph()
|
|
|
|
|
|
|
|
if len(req.Routes) == 0 {
|
|
|
|
return nil, fmt.Errorf("unable to send, no routes provided")
|
|
|
|
}
|
|
|
|
|
|
|
|
routes := make([]*routing.Route, len(req.Routes))
|
|
|
|
for i, rpcroute := range req.Routes {
|
|
|
|
route, err := unmarshallRoute(rpcroute, graph)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
routes[i] = route
|
|
|
|
}
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
return &rpcPaymentRequest{
|
2018-05-01 11:17:55 +03:00
|
|
|
SendRequest: &lnrpc.SendRequest{
|
|
|
|
PaymentHash: req.PaymentHash,
|
|
|
|
},
|
|
|
|
routes: routes,
|
|
|
|
}, nil
|
|
|
|
},
|
|
|
|
send: stream.Send,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
// rpcPaymentIntent is a small wrapper struct around the of values we can
|
|
|
|
// receive from a client over RPC if they wish to send a payment. We'll either
|
|
|
|
// extract these fields from a payment request (which may include routing
|
|
|
|
// hints), or we'll get a fully populated route from the user that we'll pass
|
|
|
|
// directly to the channel router for dispatching.
|
|
|
|
type rpcPaymentIntent struct {
|
|
|
|
msat lnwire.MilliSatoshi
|
2018-04-19 17:27:37 +03:00
|
|
|
feeLimit lnwire.MilliSatoshi
|
2018-06-07 06:40:28 +03:00
|
|
|
dest *btcec.PublicKey
|
|
|
|
rHash [32]byte
|
|
|
|
cltvDelta uint16
|
|
|
|
routeHints [][]routing.HopHint
|
|
|
|
|
|
|
|
routes []*routing.Route
|
|
|
|
}
|
|
|
|
|
|
|
|
// extractPaymentIntent attempts to parse the complete details required to
|
|
|
|
// dispatch a client from the information presented by an RPC client. There are
|
|
|
|
// three ways a client can specify their payment details: a payment request,
|
|
|
|
// via manual details, or via a complete route.
|
|
|
|
func extractPaymentIntent(rpcPayReq *rpcPaymentRequest) (rpcPaymentIntent, error) {
|
|
|
|
var err error
|
|
|
|
payIntent := rpcPaymentIntent{}
|
|
|
|
|
|
|
|
// If a route was specified, then we can use that directly.
|
|
|
|
if len(rpcPayReq.routes) != 0 {
|
|
|
|
// If the user is using the REST interface, then they'll be
|
|
|
|
// passing the payment hash as a hex encoded string.
|
|
|
|
if rpcPayReq.PaymentHashString != "" {
|
|
|
|
paymentHash, err := hex.DecodeString(
|
|
|
|
rpcPayReq.PaymentHashString,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return payIntent, err
|
|
|
|
}
|
|
|
|
|
|
|
|
copy(payIntent.rHash[:], paymentHash)
|
|
|
|
} else {
|
|
|
|
copy(payIntent.rHash[:], rpcPayReq.PaymentHash)
|
|
|
|
}
|
|
|
|
|
|
|
|
payIntent.routes = rpcPayReq.routes
|
|
|
|
return payIntent, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the payment request field isn't blank, then the details of the
|
|
|
|
// invoice are encoded entirely within the encoded payReq. So we'll
|
|
|
|
// attempt to decode it, populating the payment accordingly.
|
|
|
|
if rpcPayReq.PaymentRequest != "" {
|
|
|
|
payReq, err := zpay32.Decode(
|
|
|
|
rpcPayReq.PaymentRequest, activeNetParams.Params,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return payIntent, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, we'll ensure that this payreq hasn't already expired.
|
|
|
|
err = validatePayReqExpiry(payReq)
|
|
|
|
if err != nil {
|
|
|
|
return payIntent, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the amount was not included in the invoice, then we let
|
|
|
|
// the payee specify the amount of satoshis they wish to send.
|
|
|
|
// We override the amount to pay with the amount provided from
|
|
|
|
// the payment request.
|
|
|
|
if payReq.MilliSat == nil {
|
2018-06-12 02:25:34 +03:00
|
|
|
if rpcPayReq.Amt == 0 {
|
|
|
|
return payIntent, errors.New("amount must be " +
|
|
|
|
"specified when paying a zero amount " +
|
|
|
|
"invoice")
|
|
|
|
}
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
payIntent.msat = lnwire.NewMSatFromSatoshis(
|
|
|
|
btcutil.Amount(rpcPayReq.Amt),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
payIntent.msat = *payReq.MilliSat
|
|
|
|
}
|
|
|
|
|
2018-04-19 17:27:37 +03:00
|
|
|
// Calculate the fee limit that should be used for this payment.
|
|
|
|
payIntent.feeLimit = calculateFeeLimit(
|
|
|
|
rpcPayReq.FeeLimit, payIntent.msat,
|
|
|
|
)
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
copy(payIntent.rHash[:], payReq.PaymentHash[:])
|
|
|
|
payIntent.dest = payReq.Destination
|
|
|
|
payIntent.cltvDelta = uint16(payReq.MinFinalCLTVExpiry())
|
|
|
|
payIntent.routeHints = payReq.RouteHints
|
|
|
|
|
|
|
|
return payIntent, nil
|
2018-04-19 17:27:37 +03:00
|
|
|
}
|
2018-06-07 06:40:28 +03:00
|
|
|
|
2018-04-19 17:27:37 +03:00
|
|
|
// At this point, a destination MUST be specified, so we'll convert it
|
|
|
|
// into the proper representation now. The destination will either be
|
|
|
|
// encoded as raw bytes, or via a hex string.
|
|
|
|
if len(rpcPayReq.Dest) != 0 {
|
|
|
|
payIntent.dest, err = btcec.ParsePubKey(
|
|
|
|
rpcPayReq.Dest, btcec.S256(),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return payIntent, err
|
2018-06-07 06:40:28 +03:00
|
|
|
}
|
|
|
|
|
2018-04-19 17:27:37 +03:00
|
|
|
} else {
|
|
|
|
pubBytes, err := hex.DecodeString(rpcPayReq.DestString)
|
|
|
|
if err != nil {
|
|
|
|
return payIntent, err
|
|
|
|
}
|
|
|
|
payIntent.dest, err = btcec.ParsePubKey(pubBytes, btcec.S256())
|
|
|
|
if err != nil {
|
|
|
|
return payIntent, err
|
|
|
|
}
|
|
|
|
}
|
2018-06-07 06:40:28 +03:00
|
|
|
|
2018-04-19 17:27:37 +03:00
|
|
|
// Otherwise, If the payment request field was not specified
|
|
|
|
// (and a custom route wasn't specified), construct the payment
|
|
|
|
// from the other fields.
|
|
|
|
payIntent.msat = lnwire.NewMSatFromSatoshis(
|
|
|
|
btcutil.Amount(rpcPayReq.Amt),
|
|
|
|
)
|
2018-06-07 06:40:28 +03:00
|
|
|
|
2018-04-19 17:27:37 +03:00
|
|
|
// Calculate the fee limit that should be used for this payment.
|
|
|
|
payIntent.feeLimit = calculateFeeLimit(
|
|
|
|
rpcPayReq.FeeLimit, payIntent.msat,
|
|
|
|
)
|
|
|
|
|
|
|
|
payIntent.cltvDelta = uint16(rpcPayReq.FinalCltvDelta)
|
|
|
|
|
2018-07-01 01:13:14 +03:00
|
|
|
// If the user is manually specifying payment details, then the payment
|
|
|
|
// hash may be encoded as a string.
|
|
|
|
switch {
|
|
|
|
case rpcPayReq.PaymentHashString != "":
|
2018-04-19 17:27:37 +03:00
|
|
|
paymentHash, err := hex.DecodeString(
|
|
|
|
rpcPayReq.PaymentHashString,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return payIntent, err
|
2018-06-07 06:40:28 +03:00
|
|
|
}
|
2018-04-19 17:27:37 +03:00
|
|
|
|
|
|
|
copy(payIntent.rHash[:], paymentHash)
|
2018-06-07 06:40:28 +03:00
|
|
|
|
|
|
|
// If we're in debug HTLC mode, then all outgoing HTLCs will pay to the
|
|
|
|
// same debug rHash. Otherwise, we pay to the rHash specified within
|
|
|
|
// the RPC request.
|
2018-07-01 01:13:14 +03:00
|
|
|
case cfg.DebugHTLC && bytes.Equal(payIntent.rHash[:], zeroHash[:]):
|
2018-06-07 06:40:28 +03:00
|
|
|
copy(payIntent.rHash[:], debugHash[:])
|
2018-07-01 01:13:14 +03:00
|
|
|
|
|
|
|
default:
|
|
|
|
copy(payIntent.rHash[:], rpcPayReq.PaymentHash)
|
2018-06-07 06:40:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Currently, within the bootstrap phase of the network, we limit the
|
|
|
|
// largest payment size allotted to (2^32) - 1 mSAT or 4.29 million
|
|
|
|
// satoshis.
|
|
|
|
if payIntent.msat > maxPaymentMSat {
|
|
|
|
// In this case, we'll send an error to the caller, but
|
|
|
|
// continue our loop for the next payment.
|
|
|
|
return payIntent, fmt.Errorf("payment of %v is too large, "+
|
|
|
|
"max payment allowed is %v", payIntent.msat,
|
|
|
|
maxPaymentMSat)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
return payIntent, nil
|
|
|
|
}
|
|
|
|
|
2018-07-31 11:29:12 +03:00
|
|
|
type paymentIntentResponse struct {
|
|
|
|
Route *routing.Route
|
|
|
|
Preimage [32]byte
|
|
|
|
Err error
|
|
|
|
}
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
// dispatchPaymentIntent attempts to fully dispatch an RPC payment intent.
|
|
|
|
// We'll either pass the payment as a whole to the channel router, or give it a
|
|
|
|
// pre-built route. The first error this method returns denotes if we were
|
|
|
|
// unable to save the payment. The second error returned denotes if the payment
|
|
|
|
// didn't succeed.
|
2018-07-31 11:29:12 +03:00
|
|
|
func (r *rpcServer) dispatchPaymentIntent(
|
|
|
|
payIntent *rpcPaymentIntent) (*paymentIntentResponse, error) {
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
// Construct a payment request to send to the channel router. If the
|
|
|
|
// payment is successful, the route chosen will be returned. Otherwise,
|
|
|
|
// we'll get a non-nil error.
|
|
|
|
var (
|
|
|
|
preImage [32]byte
|
|
|
|
route *routing.Route
|
|
|
|
routerErr error
|
|
|
|
)
|
|
|
|
|
|
|
|
// If a route was specified, then we'll pass the route directly to the
|
|
|
|
// router, otherwise we'll create a payment session to execute it.
|
|
|
|
if len(payIntent.routes) == 0 {
|
|
|
|
payment := &routing.LightningPayment{
|
|
|
|
Target: payIntent.dest,
|
|
|
|
Amount: payIntent.msat,
|
2018-04-19 17:27:37 +03:00
|
|
|
FeeLimit: payIntent.feeLimit,
|
2018-06-07 06:40:28 +03:00
|
|
|
PaymentHash: payIntent.rHash,
|
|
|
|
RouteHints: payIntent.routeHints,
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the final CLTV value was specified, then we'll use that
|
|
|
|
// rather than the default.
|
|
|
|
if payIntent.cltvDelta != 0 {
|
|
|
|
payment.FinalCLTVDelta = &payIntent.cltvDelta
|
|
|
|
}
|
|
|
|
|
|
|
|
preImage, route, routerErr = r.server.chanRouter.SendPayment(
|
|
|
|
payment,
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
payment := &routing.LightningPayment{
|
|
|
|
PaymentHash: payIntent.rHash,
|
|
|
|
}
|
|
|
|
|
|
|
|
preImage, route, routerErr = r.server.chanRouter.SendToRoute(
|
|
|
|
payIntent.routes, payment,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the route failed, then we'll return a nil save err, but a non-nil
|
|
|
|
// routing err.
|
|
|
|
if routerErr != nil {
|
2018-07-31 11:29:12 +03:00
|
|
|
return &paymentIntentResponse{
|
|
|
|
Err: routerErr,
|
|
|
|
}, nil
|
2018-06-07 06:40:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// If a route was used to complete this payment, then we'll need to
|
|
|
|
// compute the final amount sent
|
|
|
|
var amt lnwire.MilliSatoshi
|
|
|
|
if len(payIntent.routes) > 0 {
|
|
|
|
amt = route.TotalAmount - route.TotalFees
|
|
|
|
} else {
|
|
|
|
amt = payIntent.msat
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save the completed payment to the database for record keeping
|
|
|
|
// purposes.
|
|
|
|
err := r.savePayment(route, amt, preImage[:])
|
|
|
|
if err != nil {
|
|
|
|
// We weren't able to save the payment, so we return the save
|
|
|
|
// err, but a nil routing err.
|
2018-07-31 11:29:12 +03:00
|
|
|
return nil, err
|
2018-06-07 06:40:28 +03:00
|
|
|
}
|
|
|
|
|
2018-07-31 11:29:12 +03:00
|
|
|
return &paymentIntentResponse{
|
|
|
|
Route: route,
|
|
|
|
Preimage: preImage,
|
|
|
|
}, nil
|
2018-06-07 06:40:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// sendPayment takes a paymentStream (a source of pre-built routes or payment
|
|
|
|
// requests) and continually attempt to dispatch payment requests written to
|
|
|
|
// the write end of the stream. Responses will also be streamed back to the
|
|
|
|
// client via the write end of the stream. This method is by both SendToRoute
|
|
|
|
// and SendPayment as the logic is virtually identical.
|
2018-05-01 11:17:55 +03:00
|
|
|
func (r *rpcServer) sendPayment(stream *paymentStream) error {
|
2018-06-07 06:40:28 +03:00
|
|
|
payChan := make(chan *rpcPaymentIntent)
|
2016-07-22 02:22:30 +03:00
|
|
|
errChan := make(chan error, 1)
|
2016-10-27 01:05:10 +03:00
|
|
|
|
2017-08-03 06:59:43 +03:00
|
|
|
// We don't allow payments to be sent while the daemon itself is still
|
|
|
|
// syncing as we may be trying to sent a payment over a "stale"
|
|
|
|
// channel.
|
|
|
|
if !r.server.Started() {
|
|
|
|
return fmt.Errorf("chain backend is still syncing, server " +
|
|
|
|
"not active yet")
|
|
|
|
}
|
|
|
|
|
2017-06-17 01:11:02 +03:00
|
|
|
// TODO(roasbeef): check payment filter to see if already used?
|
|
|
|
|
2017-04-12 07:24:16 +03:00
|
|
|
// In order to limit the level of concurrency and prevent a client from
|
|
|
|
// attempting to OOM the server, we'll set up a semaphore to create an
|
|
|
|
// upper ceiling on the number of outstanding payments.
|
|
|
|
const numOutstandingPayments = 2000
|
|
|
|
htlcSema := make(chan struct{}, numOutstandingPayments)
|
|
|
|
for i := 0; i < numOutstandingPayments; i++ {
|
|
|
|
htlcSema <- struct{}{}
|
|
|
|
}
|
|
|
|
|
2016-10-27 01:05:10 +03:00
|
|
|
// Launch a new goroutine to handle reading new payment requests from
|
|
|
|
// the client. This way we can handle errors independently of blocking
|
|
|
|
// and waiting for the next payment request to come through.
|
2017-08-22 10:24:37 +03:00
|
|
|
reqQuit := make(chan struct{})
|
|
|
|
defer func() {
|
|
|
|
close(reqQuit)
|
|
|
|
}()
|
2016-10-27 01:05:10 +03:00
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
2017-08-22 10:24:37 +03:00
|
|
|
case <-reqQuit:
|
|
|
|
return
|
2016-10-27 01:05:10 +03:00
|
|
|
case <-r.quit:
|
|
|
|
errChan <- nil
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
// Receive the next pending payment within the
|
|
|
|
// stream sent by the client. If we read the
|
|
|
|
// EOF sentinel, then the client has closed the
|
|
|
|
// stream, and we can exit normally.
|
2018-05-01 11:17:55 +03:00
|
|
|
nextPayment, err := stream.recv()
|
2016-10-27 01:05:10 +03:00
|
|
|
if err == io.EOF {
|
|
|
|
errChan <- nil
|
|
|
|
return
|
|
|
|
} else if err != nil {
|
2017-08-22 10:24:37 +03:00
|
|
|
select {
|
|
|
|
case errChan <- err:
|
|
|
|
case <-reqQuit:
|
|
|
|
return
|
|
|
|
}
|
2016-10-27 01:05:10 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-09-05 19:08:02 +03:00
|
|
|
// Populate the next payment, either from the
|
|
|
|
// payment request, or from the explicitly set
|
2018-06-07 06:40:28 +03:00
|
|
|
// fields. If the payment proto wasn't well
|
|
|
|
// formed, then we'll send an error reply and
|
|
|
|
// wait for the next payment.
|
|
|
|
payIntent, err := extractPaymentIntent(nextPayment)
|
|
|
|
if err != nil {
|
|
|
|
if err := stream.send(&lnrpc.SendResponse{
|
|
|
|
PaymentError: err.Error(),
|
|
|
|
}); err != nil {
|
|
|
|
select {
|
|
|
|
case errChan <- err:
|
|
|
|
case <-reqQuit:
|
2018-05-01 11:17:55 +03:00
|
|
|
return
|
2017-09-05 19:08:02 +03:00
|
|
|
}
|
|
|
|
}
|
2018-06-07 06:40:28 +03:00
|
|
|
continue
|
2017-01-03 02:36:15 +03:00
|
|
|
}
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
// If the payment was well formed, then we'll
|
|
|
|
// send to the dispatch goroutine, or exit,
|
|
|
|
// which ever comes first
|
2017-08-22 10:24:37 +03:00
|
|
|
select {
|
2018-06-07 06:40:28 +03:00
|
|
|
case payChan <- &payIntent:
|
2017-08-22 10:24:37 +03:00
|
|
|
case <-reqQuit:
|
|
|
|
return
|
|
|
|
}
|
2016-10-27 01:05:10 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2016-09-21 03:15:26 +03:00
|
|
|
|
2016-07-13 03:46:25 +03:00
|
|
|
for {
|
2016-07-22 02:22:30 +03:00
|
|
|
select {
|
|
|
|
case err := <-errChan:
|
2016-07-13 03:46:25 +03:00
|
|
|
return err
|
2016-12-27 08:51:18 +03:00
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
case payIntent := <-payChan:
|
2016-11-11 04:37:21 +03:00
|
|
|
// We launch a new goroutine to execute the current
|
|
|
|
// payment so we can continue to serve requests while
|
2016-12-27 08:51:18 +03:00
|
|
|
// this payment is being dispatched.
|
2016-07-22 02:22:30 +03:00
|
|
|
go func() {
|
2017-04-12 07:24:16 +03:00
|
|
|
// Attempt to grab a free semaphore slot, using
|
|
|
|
// a defer to eventually release the slot
|
|
|
|
// regardless of payment success.
|
|
|
|
<-htlcSema
|
|
|
|
defer func() {
|
|
|
|
htlcSema <- struct{}{}
|
|
|
|
}()
|
|
|
|
|
2018-07-31 11:29:12 +03:00
|
|
|
resp, saveErr := r.dispatchPaymentIntent(
|
2018-06-07 06:40:28 +03:00
|
|
|
payIntent,
|
2018-05-01 11:17:55 +03:00
|
|
|
)
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
switch {
|
2018-07-31 11:29:12 +03:00
|
|
|
// If we were unable to save the state of the
|
|
|
|
// payment, then we'll return the error to the
|
|
|
|
// user, and terminate.
|
|
|
|
case saveErr != nil:
|
|
|
|
errChan <- saveErr
|
|
|
|
return
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
// If we receive payment error than, instead of
|
|
|
|
// terminating the stream, send error response
|
|
|
|
// to the user.
|
2018-07-31 11:29:12 +03:00
|
|
|
case resp.Err != nil:
|
2018-05-01 11:17:55 +03:00
|
|
|
err := stream.send(&lnrpc.SendResponse{
|
2018-07-31 11:29:12 +03:00
|
|
|
PaymentError: resp.Err.Error(),
|
2017-05-19 15:18:21 +03:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
errChan <- err
|
|
|
|
}
|
2016-07-22 02:22:30 +03:00
|
|
|
return
|
2016-12-27 08:51:18 +03:00
|
|
|
}
|
2016-12-21 12:19:01 +03:00
|
|
|
|
2018-07-31 11:29:12 +03:00
|
|
|
marshalledRouted := marshallRoute(resp.Route)
|
2018-05-01 11:17:55 +03:00
|
|
|
err := stream.send(&lnrpc.SendResponse{
|
2018-07-31 11:29:12 +03:00
|
|
|
PaymentPreimage: resp.Preimage[:],
|
|
|
|
PaymentRoute: marshalledRouted,
|
2017-02-02 05:29:46 +03:00
|
|
|
})
|
|
|
|
if err != nil {
|
2016-07-22 02:22:30 +03:00
|
|
|
errChan <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}()
|
2016-07-13 03:46:25 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-07-15 14:02:59 +03:00
|
|
|
|
2016-11-11 04:37:21 +03:00
|
|
|
// SendPaymentSync is the synchronous non-streaming version of SendPayment.
|
|
|
|
// This RPC is intended to be consumed by clients of the REST proxy.
|
|
|
|
// Additionally, this RPC expects the destination's public key and the payment
|
|
|
|
// hash (if any) to be encoded as hex strings.
|
|
|
|
func (r *rpcServer) SendPaymentSync(ctx context.Context,
|
|
|
|
nextPayment *lnrpc.SendRequest) (*lnrpc.SendResponse, error) {
|
2017-08-22 10:28:40 +03:00
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
return r.sendPaymentSync(ctx, &rpcPaymentRequest{
|
2018-05-01 11:17:55 +03:00
|
|
|
SendRequest: nextPayment,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// SendToRouteSync is the synchronous non-streaming version of SendToRoute.
|
|
|
|
// This RPC is intended to be consumed by clients of the REST proxy.
|
2018-06-07 06:40:28 +03:00
|
|
|
// Additionally, this RPC expects the payment hash (if any) to be encoded as
|
|
|
|
// hex strings.
|
2018-05-01 11:17:55 +03:00
|
|
|
func (r *rpcServer) SendToRouteSync(ctx context.Context,
|
|
|
|
req *lnrpc.SendToRouteRequest) (*lnrpc.SendResponse, error) {
|
|
|
|
|
|
|
|
if len(req.Routes) == 0 {
|
|
|
|
return nil, fmt.Errorf("unable to send, no routes provided")
|
|
|
|
}
|
|
|
|
|
|
|
|
graph := r.server.chanDB.ChannelGraph()
|
|
|
|
|
|
|
|
routes := make([]*routing.Route, len(req.Routes))
|
|
|
|
for i, route := range req.Routes {
|
|
|
|
route, err := unmarshallRoute(route, graph)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
routes[i] = route
|
|
|
|
}
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
return r.sendPaymentSync(ctx, &rpcPaymentRequest{
|
2018-05-01 11:17:55 +03:00
|
|
|
SendRequest: &lnrpc.SendRequest{
|
|
|
|
PaymentHashString: req.PaymentHashString,
|
|
|
|
},
|
|
|
|
routes: routes,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
// sendPaymentSync is the synchronous variant of sendPayment. It will block and
|
|
|
|
// wait until the payment has been fully completed.
|
2018-05-01 11:17:55 +03:00
|
|
|
func (r *rpcServer) sendPaymentSync(ctx context.Context,
|
2018-06-07 06:40:28 +03:00
|
|
|
nextPayment *rpcPaymentRequest) (*lnrpc.SendResponse, error) {
|
2018-05-01 11:17:55 +03:00
|
|
|
|
2017-08-03 06:59:43 +03:00
|
|
|
// We don't allow payments to be sent while the daemon itself is still
|
|
|
|
// syncing as we may be trying to sent a payment over a "stale"
|
|
|
|
// channel.
|
|
|
|
if !r.server.Started() {
|
|
|
|
return nil, fmt.Errorf("chain backend is still syncing, server " +
|
|
|
|
"not active yet")
|
|
|
|
}
|
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
// First we'll attempt to map the proto describing the next payment to
|
|
|
|
// an intent that we can pass to local sub-systems.
|
|
|
|
payIntent, err := extractPaymentIntent(nextPayment)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2017-10-23 04:31:57 +03:00
|
|
|
}
|
2018-05-01 11:17:55 +03:00
|
|
|
|
2018-06-07 06:40:28 +03:00
|
|
|
// With the payment validated, we'll now attempt to dispatch the
|
|
|
|
// payment.
|
2018-07-31 11:29:12 +03:00
|
|
|
resp, saveErr := r.dispatchPaymentIntent(&payIntent)
|
2018-06-07 06:40:28 +03:00
|
|
|
switch {
|
2018-07-31 11:29:12 +03:00
|
|
|
case saveErr != nil:
|
|
|
|
return nil, saveErr
|
|
|
|
|
|
|
|
case resp.Err != nil:
|
2017-10-26 01:32:00 +03:00
|
|
|
return &lnrpc.SendResponse{
|
2018-07-31 11:29:12 +03:00
|
|
|
PaymentError: resp.Err.Error(),
|
2017-10-26 01:32:00 +03:00
|
|
|
}, nil
|
2016-12-27 08:51:18 +03:00
|
|
|
}
|
|
|
|
|
2017-02-21 03:33:45 +03:00
|
|
|
return &lnrpc.SendResponse{
|
2018-07-31 11:29:12 +03:00
|
|
|
PaymentPreimage: resp.Preimage[:],
|
|
|
|
PaymentRoute: marshallRoute(resp.Route),
|
2017-02-21 03:33:45 +03:00
|
|
|
}, nil
|
2016-11-11 04:37:21 +03:00
|
|
|
}
|
|
|
|
|
2016-09-19 22:04:56 +03:00
|
|
|
// AddInvoice attempts to add a new invoice to the invoice database. Any
|
|
|
|
// duplicated invoices are rejected, therefore all invoices *must* have a
|
|
|
|
// unique payment preimage.
|
|
|
|
func (r *rpcServer) AddInvoice(ctx context.Context,
|
|
|
|
invoice *lnrpc.Invoice) (*lnrpc.AddInvoiceResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2016-11-11 04:37:21 +03:00
|
|
|
var paymentPreimage [32]byte
|
|
|
|
|
|
|
|
switch {
|
|
|
|
// If a preimage wasn't specified, then we'll generate a new preimage
|
|
|
|
// from fresh cryptographic randomness.
|
|
|
|
case len(invoice.RPreimage) == 0:
|
|
|
|
if _, err := rand.Read(paymentPreimage[:]); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, if a preimage was specified, then it MUST be exactly
|
|
|
|
// 32-bytes.
|
|
|
|
case len(invoice.RPreimage) > 0 && len(invoice.RPreimage) != 32:
|
2016-09-19 22:04:56 +03:00
|
|
|
return nil, fmt.Errorf("payment preimage must be exactly "+
|
2016-11-11 04:37:21 +03:00
|
|
|
"32 bytes, is instead %v", len(invoice.RPreimage))
|
|
|
|
|
|
|
|
// If the preimage meets the size specifications, then it can be used
|
|
|
|
// as is.
|
|
|
|
default:
|
|
|
|
copy(paymentPreimage[:], invoice.RPreimage[:])
|
2016-09-19 22:04:56 +03:00
|
|
|
}
|
|
|
|
|
2017-09-05 19:08:02 +03:00
|
|
|
// The size of the memo, receipt and description hash attached must not
|
|
|
|
// exceed the maximum values for either of the fields.
|
2016-09-19 22:04:56 +03:00
|
|
|
if len(invoice.Memo) > channeldb.MaxMemoSize {
|
|
|
|
return nil, fmt.Errorf("memo too large: %v bytes "+
|
|
|
|
"(maxsize=%v)", len(invoice.Memo), channeldb.MaxMemoSize)
|
|
|
|
}
|
|
|
|
if len(invoice.Receipt) > channeldb.MaxReceiptSize {
|
|
|
|
return nil, fmt.Errorf("receipt too large: %v bytes "+
|
|
|
|
"(maxsize=%v)", len(invoice.Receipt), channeldb.MaxReceiptSize)
|
|
|
|
}
|
2017-09-05 19:08:02 +03:00
|
|
|
if len(invoice.DescriptionHash) > 0 && len(invoice.DescriptionHash) != 32 {
|
|
|
|
return nil, fmt.Errorf("description hash is %v bytes, must be %v",
|
|
|
|
len(invoice.DescriptionHash), channeldb.MaxPaymentRequestSize)
|
|
|
|
}
|
2016-09-19 22:04:56 +03:00
|
|
|
|
2018-07-26 06:11:46 +03:00
|
|
|
// The value of the invoice must not be negative.
|
|
|
|
if invoice.Value < 0 {
|
|
|
|
return nil, fmt.Errorf("payments of negative value "+
|
|
|
|
"are not allowed, value is %v", invoice.Value)
|
|
|
|
}
|
2018-07-31 10:17:17 +03:00
|
|
|
|
2017-08-22 09:25:41 +03:00
|
|
|
amt := btcutil.Amount(invoice.Value)
|
|
|
|
amtMSat := lnwire.NewMSatFromSatoshis(amt)
|
2017-08-22 10:28:40 +03:00
|
|
|
|
|
|
|
// The value of the invoice must also not exceed the current soft-limit
|
|
|
|
// on the largest payment within the network.
|
2018-01-22 23:01:43 +03:00
|
|
|
if amtMSat > maxPaymentMSat {
|
2017-08-22 10:28:40 +03:00
|
|
|
return nil, fmt.Errorf("payment of %v is too large, max "+
|
|
|
|
"payment allowed is %v", amt, maxPaymentMSat.ToSatoshis())
|
2017-01-03 02:35:22 +03:00
|
|
|
}
|
|
|
|
|
2017-09-05 19:08:02 +03:00
|
|
|
// Next, generate the payment hash itself from the preimage. This will
|
|
|
|
// be used by clients to query for the state of a particular invoice.
|
|
|
|
rHash := sha256.Sum256(paymentPreimage[:])
|
|
|
|
|
|
|
|
// We also create an encoded payment request which allows the
|
|
|
|
// caller to compactly send the invoice to the payer. We'll create a
|
|
|
|
// list of options to be added to the encoded payment request. For now
|
|
|
|
// we only support the required fields description/description_hash,
|
|
|
|
// expiry, fallback address, and the amount field.
|
|
|
|
var options []func(*zpay32.Invoice)
|
|
|
|
|
2018-01-22 23:01:43 +03:00
|
|
|
// We only include the amount in the invoice if it is greater than 0.
|
|
|
|
// By not including the amount, we enable the creation of invoices that
|
|
|
|
// allow the payee to specify the amount of satoshis they wish to send.
|
|
|
|
if amtMSat > 0 {
|
|
|
|
options = append(options, zpay32.Amount(amtMSat))
|
|
|
|
}
|
2017-09-05 19:08:02 +03:00
|
|
|
|
|
|
|
// If specified, add a fallback address to the payment request.
|
|
|
|
if len(invoice.FallbackAddr) > 0 {
|
|
|
|
addr, err := btcutil.DecodeAddress(invoice.FallbackAddr,
|
|
|
|
activeNetParams.Params)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("invalid fallback address: %v",
|
|
|
|
err)
|
|
|
|
}
|
|
|
|
options = append(options, zpay32.FallbackAddr(addr))
|
|
|
|
}
|
|
|
|
|
|
|
|
// If expiry is set, specify it. If it is not provided, no expiry time
|
|
|
|
// will be explicitly added to this payment request, which will imply
|
|
|
|
// the default 3600 seconds.
|
|
|
|
if invoice.Expiry > 0 {
|
2018-03-30 02:25:03 +03:00
|
|
|
|
|
|
|
// We'll ensure that the specified expiry is restricted to sane
|
|
|
|
// number of seconds. As a result, we'll reject an invoice with
|
|
|
|
// an expiry greater than 1 year.
|
|
|
|
maxExpiry := time.Hour * 24 * 365
|
|
|
|
expSeconds := invoice.Expiry
|
|
|
|
|
|
|
|
if float64(expSeconds) > maxExpiry.Seconds() {
|
|
|
|
return nil, fmt.Errorf("expiry of %v seconds "+
|
|
|
|
"greater than max expiry of %v seconds",
|
|
|
|
float64(expSeconds), maxExpiry.Seconds())
|
|
|
|
}
|
|
|
|
|
|
|
|
expiry := time.Duration(invoice.Expiry) * time.Second
|
|
|
|
options = append(options, zpay32.Expiry(expiry))
|
2017-09-05 19:08:02 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// If the description hash is set, then we add it do the list of options.
|
|
|
|
// If not, use the memo field as the payment request description.
|
|
|
|
if len(invoice.DescriptionHash) > 0 {
|
|
|
|
var descHash [32]byte
|
|
|
|
copy(descHash[:], invoice.DescriptionHash[:])
|
|
|
|
options = append(options, zpay32.DescriptionHash(descHash))
|
|
|
|
} else {
|
|
|
|
// Use the memo field as the description. If this is not set
|
|
|
|
// this will just be an empty string.
|
|
|
|
options = append(options, zpay32.Description(invoice.Memo))
|
|
|
|
}
|
|
|
|
|
2017-10-19 08:15:31 +03:00
|
|
|
// We'll use our current default CLTV value unless one was specified as
|
|
|
|
// an option on the command line when creating an invoice.
|
|
|
|
switch {
|
|
|
|
case invoice.CltvExpiry > math.MaxUint16:
|
|
|
|
return nil, fmt.Errorf("CLTV delta of %v is too large, max "+
|
|
|
|
"accepted is: %v", invoice.CltvExpiry, math.MaxUint16)
|
|
|
|
case invoice.CltvExpiry != 0:
|
|
|
|
options = append(options,
|
|
|
|
zpay32.CLTVExpiry(invoice.CltvExpiry))
|
|
|
|
default:
|
|
|
|
// TODO(roasbeef): assumes set delta between versions
|
2017-12-17 16:33:09 +03:00
|
|
|
defaultDelta := cfg.Bitcoin.TimeLockDelta
|
2018-03-14 07:15:53 +03:00
|
|
|
if registeredChains.PrimaryChain() == litecoinChain {
|
|
|
|
defaultDelta = cfg.Litecoin.TimeLockDelta
|
|
|
|
}
|
2017-10-19 08:15:31 +03:00
|
|
|
options = append(options, zpay32.CLTVExpiry(uint64(defaultDelta)))
|
|
|
|
}
|
|
|
|
|
2018-03-28 07:51:04 +03:00
|
|
|
// If we were requested to include routing hints in the invoice, then
|
|
|
|
// we'll fetch all of our available private channels and create routing
|
|
|
|
// hints for them.
|
|
|
|
if invoice.Private {
|
|
|
|
openChannels, err := r.server.chanDB.FetchAllChannels()
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("could not fetch all channels")
|
|
|
|
}
|
|
|
|
|
|
|
|
graph := r.server.chanDB.ChannelGraph()
|
|
|
|
|
|
|
|
numHints := 0
|
|
|
|
for _, channel := range openChannels {
|
|
|
|
// We'll restrict the number of individual route hints
|
|
|
|
// to 20 to avoid creating overly large invoices.
|
|
|
|
if numHints > 20 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Since we're only interested in our private channels,
|
|
|
|
// we'll skip public ones.
|
|
|
|
isPublic := channel.ChannelFlags&lnwire.FFAnnounceChannel != 0
|
|
|
|
if isPublic {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure the counterparty has enough balance in the
|
|
|
|
// channel for our amount. We do this in order to reduce
|
|
|
|
// payment errors when attempting to use this channel
|
|
|
|
// as a hint.
|
|
|
|
chanPoint := lnwire.NewChanIDFromOutPoint(
|
|
|
|
&channel.FundingOutpoint,
|
|
|
|
)
|
|
|
|
if amtMSat >= channel.LocalCommitment.RemoteBalance {
|
|
|
|
rpcsLog.Debugf("Skipping channel %v due to "+
|
|
|
|
"not having enough remote balance",
|
|
|
|
chanPoint)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure the channel is active.
|
|
|
|
link, err := r.server.htlcSwitch.GetLink(chanPoint)
|
|
|
|
if err != nil {
|
|
|
|
rpcsLog.Errorf("Unable to get link for "+
|
|
|
|
"channel %v: %v", chanPoint, err)
|
2018-04-21 18:31:50 +03:00
|
|
|
continue
|
2018-03-28 07:51:04 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if !link.EligibleToForward() {
|
|
|
|
rpcsLog.Debugf("Skipping link %v due to not "+
|
|
|
|
"being eligible to forward payments",
|
|
|
|
chanPoint)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fetch the policies for each end of the channel.
|
2018-05-02 02:27:20 +03:00
|
|
|
chanID := channel.ShortChanID().ToUint64()
|
2018-06-05 02:40:37 +03:00
|
|
|
info, p1, p2, err := graph.FetchChannelEdgesByID(chanID)
|
2018-03-28 07:51:04 +03:00
|
|
|
if err != nil {
|
|
|
|
rpcsLog.Errorf("Unable to fetch the routing "+
|
|
|
|
"policies for the edges of the channel "+
|
|
|
|
"%v: %v", chanPoint, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now, we'll need to determine which is the correct
|
|
|
|
// policy for HTLCs being sent from the remote node.
|
|
|
|
var remotePolicy *channeldb.ChannelEdgePolicy
|
|
|
|
remotePub := channel.IdentityPub.SerializeCompressed()
|
2018-06-05 02:40:37 +03:00
|
|
|
if bytes.Equal(remotePub, info.NodeKey1Bytes[:]) {
|
2018-03-28 07:51:04 +03:00
|
|
|
remotePolicy = p1
|
|
|
|
} else {
|
|
|
|
remotePolicy = p2
|
|
|
|
}
|
|
|
|
|
2018-06-05 02:40:37 +03:00
|
|
|
// If for some reason we don't yet have the edge for
|
|
|
|
// the remote party, then we'll just skip adding this
|
|
|
|
// channel as a routing hint.
|
|
|
|
if remotePolicy == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-03-28 07:51:04 +03:00
|
|
|
// Finally, create the routing hint for this channel and
|
|
|
|
// add it to our list of route hints.
|
|
|
|
hint := routing.HopHint{
|
|
|
|
NodeID: channel.IdentityPub,
|
|
|
|
ChannelID: chanID,
|
|
|
|
FeeBaseMSat: uint32(remotePolicy.FeeBaseMSat),
|
|
|
|
FeeProportionalMillionths: uint32(
|
|
|
|
remotePolicy.FeeProportionalMillionths,
|
|
|
|
),
|
|
|
|
CLTVExpiryDelta: remotePolicy.TimeLockDelta,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Include the route hint in our set of options that
|
|
|
|
// will be used when creating the invoice.
|
|
|
|
routeHint := []routing.HopHint{hint}
|
|
|
|
options = append(options, zpay32.RouteHint(routeHint))
|
|
|
|
|
|
|
|
numHints++
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-09-05 19:08:02 +03:00
|
|
|
// Create and encode the payment request as a bech32 (zpay32) string.
|
|
|
|
creationDate := time.Now()
|
|
|
|
payReq, err := zpay32.NewInvoice(
|
2018-03-28 07:51:04 +03:00
|
|
|
activeNetParams.Params, rHash, creationDate, options...,
|
2017-09-05 19:08:02 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
payReqString, err := payReq.Encode(
|
|
|
|
zpay32.MessageSigner{
|
|
|
|
SignCompact: r.server.nodeSigner.SignDigestCompact,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-06-30 04:06:24 +03:00
|
|
|
newInvoice := &channeldb.Invoice{
|
2017-09-05 19:08:02 +03:00
|
|
|
CreationDate: creationDate,
|
|
|
|
Memo: []byte(invoice.Memo),
|
|
|
|
Receipt: invoice.Receipt,
|
|
|
|
PaymentRequest: []byte(payReqString),
|
2016-09-19 22:04:56 +03:00
|
|
|
Terms: channeldb.ContractTerm{
|
2018-06-29 22:40:53 +03:00
|
|
|
Value: amtMSat,
|
2016-09-19 22:04:56 +03:00
|
|
|
},
|
|
|
|
}
|
2018-06-30 04:06:24 +03:00
|
|
|
copy(newInvoice.Terms.PaymentPreimage[:], paymentPreimage[:])
|
2016-09-19 22:04:56 +03:00
|
|
|
|
2016-09-26 20:29:18 +03:00
|
|
|
rpcsLog.Tracef("[addinvoice] adding new invoice %v",
|
|
|
|
newLogClosure(func() string {
|
2018-06-30 04:06:24 +03:00
|
|
|
return spew.Sdump(newInvoice)
|
2017-10-19 08:15:31 +03:00
|
|
|
}),
|
|
|
|
)
|
2016-09-26 20:29:18 +03:00
|
|
|
|
2016-11-11 04:37:21 +03:00
|
|
|
// With all sanity checks passed, write the invoice to the database.
|
2018-06-30 04:06:24 +03:00
|
|
|
addIndex, err := r.server.invoices.AddInvoice(newInvoice)
|
|
|
|
if err != nil {
|
2016-09-19 22:04:56 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return &lnrpc.AddInvoiceResponse{
|
2017-01-03 02:34:49 +03:00
|
|
|
RHash: rHash[:],
|
|
|
|
PaymentRequest: payReqString,
|
2018-06-30 04:06:24 +03:00
|
|
|
AddIndex: addIndex,
|
2016-09-19 22:04:56 +03:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2017-09-05 19:08:02 +03:00
|
|
|
// createRPCInvoice creates an *lnrpc.Invoice from the *channeldb.Invoice.
|
|
|
|
func createRPCInvoice(invoice *channeldb.Invoice) (*lnrpc.Invoice, error) {
|
|
|
|
paymentRequest := string(invoice.PaymentRequest)
|
2018-02-19 18:20:54 +03:00
|
|
|
decoded, err := zpay32.Decode(paymentRequest, activeNetParams.Params)
|
2017-09-05 19:08:02 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to decode payment request: %v",
|
|
|
|
err)
|
|
|
|
}
|
|
|
|
|
|
|
|
descHash := []byte("")
|
|
|
|
if decoded.DescriptionHash != nil {
|
|
|
|
descHash = decoded.DescriptionHash[:]
|
|
|
|
}
|
|
|
|
|
|
|
|
fallbackAddr := ""
|
|
|
|
if decoded.FallbackAddr != nil {
|
|
|
|
fallbackAddr = decoded.FallbackAddr.String()
|
|
|
|
}
|
|
|
|
|
2017-12-05 09:18:55 +03:00
|
|
|
settleDate := int64(0)
|
|
|
|
if !invoice.SettleDate.IsZero() {
|
|
|
|
settleDate = invoice.SettleDate.Unix()
|
|
|
|
}
|
|
|
|
|
2017-09-05 19:08:02 +03:00
|
|
|
// Expiry time will default to 3600 seconds if not specified
|
|
|
|
// explicitly.
|
|
|
|
expiry := int64(decoded.Expiry().Seconds())
|
|
|
|
|
2017-10-19 08:15:52 +03:00
|
|
|
// The expiry will default to 9 blocks if not specified explicitly.
|
|
|
|
cltvExpiry := decoded.MinFinalCLTVExpiry()
|
|
|
|
|
2018-03-28 07:01:21 +03:00
|
|
|
// Convert between the `lnrpc` and `routing` types.
|
|
|
|
routeHints := createRPCRouteHints(decoded.RouteHints)
|
|
|
|
|
2017-09-05 19:08:02 +03:00
|
|
|
preimage := invoice.Terms.PaymentPreimage
|
|
|
|
satAmt := invoice.Terms.Value.ToSatoshis()
|
2018-09-06 09:39:09 +03:00
|
|
|
satAmtPaid := invoice.AmtPaid.ToSatoshis()
|
2017-09-05 19:08:02 +03:00
|
|
|
|
|
|
|
return &lnrpc.Invoice{
|
|
|
|
Memo: string(invoice.Memo[:]),
|
|
|
|
Receipt: invoice.Receipt[:],
|
|
|
|
RHash: decoded.PaymentHash[:],
|
|
|
|
RPreimage: preimage[:],
|
|
|
|
Value: int64(satAmt),
|
|
|
|
CreationDate: invoice.CreationDate.Unix(),
|
2017-12-05 09:18:55 +03:00
|
|
|
SettleDate: settleDate,
|
2017-09-05 19:08:02 +03:00
|
|
|
Settled: invoice.Terms.Settled,
|
|
|
|
PaymentRequest: paymentRequest,
|
|
|
|
DescriptionHash: descHash,
|
|
|
|
Expiry: expiry,
|
2017-10-19 08:15:52 +03:00
|
|
|
CltvExpiry: cltvExpiry,
|
2017-09-05 19:08:02 +03:00
|
|
|
FallbackAddr: fallbackAddr,
|
2018-03-28 07:01:21 +03:00
|
|
|
RouteHints: routeHints,
|
2018-04-25 07:03:05 +03:00
|
|
|
AddIndex: invoice.AddIndex,
|
|
|
|
SettleIndex: invoice.SettleIndex,
|
2018-09-06 09:39:09 +03:00
|
|
|
AmtPaidSat: int64(satAmtPaid),
|
|
|
|
AmtPaidMsat: int64(invoice.AmtPaid),
|
|
|
|
AmtPaid: int64(invoice.AmtPaid),
|
2017-09-05 19:08:02 +03:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2018-03-28 07:01:21 +03:00
|
|
|
// createRPCRouteHints takes in the decoded form of an invoice's route hints
|
|
|
|
// and converts them into the lnrpc type.
|
|
|
|
func createRPCRouteHints(routeHints [][]routing.HopHint) []*lnrpc.RouteHint {
|
|
|
|
var res []*lnrpc.RouteHint
|
|
|
|
|
|
|
|
for _, route := range routeHints {
|
|
|
|
hopHints := make([]*lnrpc.HopHint, 0, len(route))
|
|
|
|
for _, hop := range route {
|
|
|
|
pubKey := hex.EncodeToString(
|
|
|
|
hop.NodeID.SerializeCompressed(),
|
|
|
|
)
|
|
|
|
|
|
|
|
hint := &lnrpc.HopHint{
|
|
|
|
NodeId: pubKey,
|
|
|
|
ChanId: hop.ChannelID,
|
|
|
|
FeeBaseMsat: hop.FeeBaseMSat,
|
|
|
|
FeeProportionalMillionths: hop.FeeProportionalMillionths,
|
|
|
|
CltvExpiryDelta: uint32(hop.CLTVExpiryDelta),
|
|
|
|
}
|
|
|
|
|
|
|
|
hopHints = append(hopHints, hint)
|
|
|
|
}
|
|
|
|
|
|
|
|
routeHint := &lnrpc.RouteHint{HopHints: hopHints}
|
|
|
|
res = append(res, routeHint)
|
|
|
|
}
|
|
|
|
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
// LookupInvoice attempts to look up an invoice according to its payment hash.
|
2016-09-19 22:04:56 +03:00
|
|
|
// The passed payment hash *must* be exactly 32 bytes, if not an error is
|
|
|
|
// returned.
|
|
|
|
func (r *rpcServer) LookupInvoice(ctx context.Context,
|
|
|
|
req *lnrpc.PaymentHash) (*lnrpc.Invoice, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2016-10-16 00:41:11 +03:00
|
|
|
var (
|
|
|
|
payHash [32]byte
|
|
|
|
rHash []byte
|
|
|
|
err error
|
|
|
|
)
|
|
|
|
|
|
|
|
// If the RHash as a raw string was provided, then decode that and use
|
|
|
|
// that directly. Otherwise, we use the raw bytes provided.
|
|
|
|
if req.RHashStr != "" {
|
|
|
|
rHash, err = hex.DecodeString(req.RHashStr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
rHash = req.RHash
|
2016-09-19 22:04:56 +03:00
|
|
|
}
|
|
|
|
|
2016-10-16 00:41:11 +03:00
|
|
|
// Ensure that the payment hash is *exactly* 32-bytes.
|
|
|
|
if len(rHash) != 0 && len(rHash) != 32 {
|
|
|
|
return nil, fmt.Errorf("payment hash must be exactly "+
|
|
|
|
"32 bytes, is instead %v", len(rHash))
|
|
|
|
}
|
|
|
|
copy(payHash[:], rHash)
|
2016-09-19 22:04:56 +03:00
|
|
|
|
2016-09-26 20:29:18 +03:00
|
|
|
rpcsLog.Tracef("[lookupinvoice] searching for invoice %x", payHash[:])
|
|
|
|
|
2018-06-30 02:03:46 +03:00
|
|
|
invoice, _, err := r.server.invoices.LookupInvoice(payHash)
|
2016-09-19 22:04:56 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-09-26 20:29:18 +03:00
|
|
|
rpcsLog.Tracef("[lookupinvoice] located invoice %v",
|
|
|
|
newLogClosure(func() string {
|
|
|
|
return spew.Sdump(invoice)
|
|
|
|
}))
|
|
|
|
|
2017-11-12 03:09:14 +03:00
|
|
|
rpcInvoice, err := createRPCInvoice(&invoice)
|
2017-09-05 19:08:02 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return rpcInvoice, nil
|
2016-09-19 22:04:56 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListInvoices returns a list of all the invoices currently stored within the
|
|
|
|
// database. Any active debug invoices are ignored.
|
|
|
|
func (r *rpcServer) ListInvoices(ctx context.Context,
|
|
|
|
req *lnrpc.ListInvoiceRequest) (*lnrpc.ListInvoiceResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2018-08-11 06:24:43 +03:00
|
|
|
// If the number of invoices was not specified, then we'll default to
|
|
|
|
// returning the latest 100 invoices.
|
|
|
|
if req.NumMaxInvoices == 0 {
|
|
|
|
req.NumMaxInvoices = 100
|
2016-09-19 22:04:56 +03:00
|
|
|
}
|
|
|
|
|
2018-08-11 06:24:43 +03:00
|
|
|
// Next, we'll map the proto request into a format that is understood by
|
|
|
|
// the database.
|
|
|
|
q := channeldb.InvoiceQuery{
|
|
|
|
IndexOffset: req.IndexOffset,
|
|
|
|
NumMaxInvoices: req.NumMaxInvoices,
|
|
|
|
PendingOnly: req.PendingOnly,
|
|
|
|
}
|
|
|
|
invoiceSlice, err := r.server.chanDB.QueryInvoices(q)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to query invoices: %v", err)
|
|
|
|
}
|
2017-04-24 05:20:20 +03:00
|
|
|
|
2018-08-11 06:24:43 +03:00
|
|
|
// Before returning the response, we'll need to convert each invoice
|
|
|
|
// into it's proto representation.
|
|
|
|
resp := &lnrpc.ListInvoiceResponse{
|
|
|
|
Invoices: make([]*lnrpc.Invoice, len(invoiceSlice.Invoices)),
|
|
|
|
LastIndexOffset: invoiceSlice.LastIndexOffset,
|
|
|
|
}
|
|
|
|
for i, invoice := range invoiceSlice.Invoices {
|
|
|
|
resp.Invoices[i], err = createRPCInvoice(invoice)
|
2017-09-05 19:08:02 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2016-09-19 22:04:56 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-11 06:24:43 +03:00
|
|
|
return resp, nil
|
2016-09-19 22:04:56 +03:00
|
|
|
}
|
|
|
|
|
2017-12-30 17:44:31 +03:00
|
|
|
// SubscribeInvoices returns a uni-directional stream (server -> client) for
|
2016-10-16 00:41:11 +03:00
|
|
|
// notifying the client of newly added/settled invoices.
|
|
|
|
func (r *rpcServer) SubscribeInvoices(req *lnrpc.InvoiceSubscription,
|
|
|
|
updateStream lnrpc.Lightning_SubscribeInvoicesServer) error {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2018-04-25 07:08:19 +03:00
|
|
|
invoiceClient := r.server.invoices.SubscribeNotifications(
|
|
|
|
req.AddIndex, req.SettleIndex,
|
|
|
|
)
|
2016-10-16 00:41:11 +03:00
|
|
|
defer invoiceClient.Cancel()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
2018-04-25 07:08:19 +03:00
|
|
|
case newInvoice := <-invoiceClient.NewInvoices:
|
|
|
|
rpcInvoice, err := createRPCInvoice(newInvoice)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-09-05 19:08:02 +03:00
|
|
|
|
2018-04-25 07:08:19 +03:00
|
|
|
if err := updateStream.Send(rpcInvoice); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
case settledInvoice := <-invoiceClient.SettledInvoices:
|
2017-09-05 19:08:02 +03:00
|
|
|
rpcInvoice, err := createRPCInvoice(settledInvoice)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2016-10-16 00:41:11 +03:00
|
|
|
}
|
2017-09-05 19:08:02 +03:00
|
|
|
|
|
|
|
if err := updateStream.Send(rpcInvoice); err != nil {
|
2016-10-16 00:41:11 +03:00
|
|
|
return err
|
|
|
|
}
|
2018-04-25 07:03:05 +03:00
|
|
|
|
2016-10-16 00:41:11 +03:00
|
|
|
case <-r.quit:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// SubscribeTransactions creates a uni-directional stream (server -> client) in
|
|
|
|
// which any newly discovered transactions relevant to the wallet are sent
|
|
|
|
// over.
|
|
|
|
func (r *rpcServer) SubscribeTransactions(req *lnrpc.GetTransactionsRequest,
|
|
|
|
updateStream lnrpc.Lightning_SubscribeTransactionsServer) error {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2017-05-18 21:55:25 +03:00
|
|
|
txClient, err := r.server.cc.wallet.SubscribeTransactions()
|
2016-10-16 00:41:11 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer txClient.Cancel()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case tx := <-txClient.ConfirmedTransactions():
|
|
|
|
detail := &lnrpc.Transaction{
|
|
|
|
TxHash: tx.Hash.String(),
|
2017-03-06 01:53:37 +03:00
|
|
|
Amount: int64(tx.Value),
|
2016-10-16 00:41:11 +03:00
|
|
|
NumConfirmations: tx.NumConfirmations,
|
|
|
|
BlockHash: tx.BlockHash.String(),
|
|
|
|
TimeStamp: tx.Timestamp,
|
|
|
|
TotalFees: tx.TotalFees,
|
|
|
|
}
|
|
|
|
if err := updateStream.Send(detail); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-04-25 07:03:05 +03:00
|
|
|
|
2016-10-16 00:41:11 +03:00
|
|
|
case tx := <-txClient.UnconfirmedTransactions():
|
|
|
|
detail := &lnrpc.Transaction{
|
|
|
|
TxHash: tx.Hash.String(),
|
2017-03-06 01:53:37 +03:00
|
|
|
Amount: int64(tx.Value),
|
2016-10-16 00:41:11 +03:00
|
|
|
TimeStamp: tx.Timestamp,
|
|
|
|
TotalFees: tx.TotalFees,
|
|
|
|
}
|
|
|
|
if err := updateStream.Send(detail); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-04-25 07:03:05 +03:00
|
|
|
|
2016-10-16 00:41:11 +03:00
|
|
|
case <-r.quit:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetTransactions returns a list of describing all the known transactions
|
|
|
|
// relevant to the wallet.
|
2017-08-18 04:50:57 +03:00
|
|
|
func (r *rpcServer) GetTransactions(ctx context.Context,
|
|
|
|
_ *lnrpc.GetTransactionsRequest) (*lnrpc.TransactionDetails, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2017-11-17 03:00:58 +03:00
|
|
|
// TODO(roasbeef): add pagination support
|
2017-05-18 21:55:25 +03:00
|
|
|
transactions, err := r.server.cc.wallet.ListTransactionDetails()
|
2016-10-16 00:41:11 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
txDetails := &lnrpc.TransactionDetails{
|
|
|
|
Transactions: make([]*lnrpc.Transaction, len(transactions)),
|
|
|
|
}
|
|
|
|
for i, tx := range transactions {
|
2017-12-06 20:19:37 +03:00
|
|
|
var destAddresses []string
|
|
|
|
for _, destAddress := range tx.DestAddresses {
|
|
|
|
destAddresses = append(destAddresses, destAddress.EncodeAddress())
|
|
|
|
}
|
|
|
|
|
2016-10-16 00:41:11 +03:00
|
|
|
txDetails.Transactions[i] = &lnrpc.Transaction{
|
|
|
|
TxHash: tx.Hash.String(),
|
2017-03-06 01:53:37 +03:00
|
|
|
Amount: int64(tx.Value),
|
2016-10-16 00:41:11 +03:00
|
|
|
NumConfirmations: tx.NumConfirmations,
|
|
|
|
BlockHash: tx.BlockHash.String(),
|
2017-03-06 00:39:42 +03:00
|
|
|
BlockHeight: tx.BlockHeight,
|
2016-10-16 00:41:11 +03:00
|
|
|
TimeStamp: tx.Timestamp,
|
|
|
|
TotalFees: tx.TotalFees,
|
2017-12-06 20:19:37 +03:00
|
|
|
DestAddresses: destAddresses,
|
2016-10-16 00:41:11 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return txDetails, nil
|
|
|
|
}
|
|
|
|
|
2016-12-27 08:51:47 +03:00
|
|
|
// DescribeGraph returns a description of the latest graph state from the PoV
|
|
|
|
// of the node. The graph information is partitioned into two components: all
|
|
|
|
// the nodes/vertexes, and all the edges that connect the vertexes themselves.
|
|
|
|
// As this is a directed graph, the edges also contain the node directional
|
|
|
|
// specific routing policy which includes: the time lock delta, fee
|
|
|
|
// information, etc.
|
2017-08-18 04:50:57 +03:00
|
|
|
func (r *rpcServer) DescribeGraph(ctx context.Context,
|
|
|
|
_ *lnrpc.ChannelGraphRequest) (*lnrpc.ChannelGraph, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2016-12-27 08:51:47 +03:00
|
|
|
resp := &lnrpc.ChannelGraph{}
|
2016-09-21 03:15:26 +03:00
|
|
|
|
2017-04-14 23:17:51 +03:00
|
|
|
// Obtain the pointer to the global singleton channel graph, this will
|
2016-12-27 08:51:47 +03:00
|
|
|
// provide a consistent view of the graph due to bolt db's
|
|
|
|
// transactional model.
|
|
|
|
graph := r.server.chanDB.ChannelGraph()
|
|
|
|
|
|
|
|
// First iterate through all the known nodes (connected or unconnected
|
|
|
|
// within the graph), collating their current state into the RPC
|
|
|
|
// response.
|
2017-04-14 23:17:51 +03:00
|
|
|
err := graph.ForEachNode(nil, func(_ *bolt.Tx, node *channeldb.LightningNode) error {
|
2017-02-17 12:29:23 +03:00
|
|
|
nodeAddrs := make([]*lnrpc.NodeAddress, 0)
|
|
|
|
for _, addr := range node.Addresses {
|
|
|
|
nodeAddr := &lnrpc.NodeAddress{
|
|
|
|
Network: addr.Network(),
|
|
|
|
Addr: addr.String(),
|
|
|
|
}
|
|
|
|
nodeAddrs = append(nodeAddrs, nodeAddr)
|
|
|
|
}
|
2017-12-03 05:37:34 +03:00
|
|
|
|
|
|
|
nodeColor := fmt.Sprintf("#%02x%02x%02x", node.Color.R, node.Color.G, node.Color.B)
|
2016-12-27 08:51:47 +03:00
|
|
|
resp.Nodes = append(resp.Nodes, &lnrpc.LightningNode{
|
|
|
|
LastUpdate: uint32(node.LastUpdate.Unix()),
|
2018-01-31 07:30:00 +03:00
|
|
|
PubKey: hex.EncodeToString(node.PubKeyBytes[:]),
|
2017-02-17 12:29:23 +03:00
|
|
|
Addresses: nodeAddrs,
|
2016-12-27 08:51:47 +03:00
|
|
|
Alias: node.Alias,
|
2017-12-03 05:37:34 +03:00
|
|
|
Color: nodeColor,
|
2016-12-27 08:51:47 +03:00
|
|
|
})
|
2017-02-17 12:29:23 +03:00
|
|
|
|
2016-12-27 08:51:47 +03:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, for each active channel we know of within the graph, create a
|
|
|
|
// similar response which details both the edge information as well as
|
|
|
|
// the routing policies of th nodes connecting the two edges.
|
2017-03-06 04:28:12 +03:00
|
|
|
err = graph.ForEachChannel(func(edgeInfo *channeldb.ChannelEdgeInfo,
|
|
|
|
c1, c2 *channeldb.ChannelEdgePolicy) error {
|
|
|
|
|
|
|
|
edge := marshalDbEdge(edgeInfo, c1, c2)
|
2016-12-27 08:51:47 +03:00
|
|
|
resp.Edges = append(resp.Edges, edge)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil && err != channeldb.ErrGraphNoEdgesFound {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|
|
|
|
|
2017-03-06 04:28:12 +03:00
|
|
|
func marshalDbEdge(edgeInfo *channeldb.ChannelEdgeInfo,
|
|
|
|
c1, c2 *channeldb.ChannelEdgePolicy) *lnrpc.ChannelEdge {
|
|
|
|
|
2017-01-18 00:20:06 +03:00
|
|
|
var (
|
2017-03-06 04:28:12 +03:00
|
|
|
lastUpdate int64
|
2017-01-18 00:20:06 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
if c2 != nil {
|
|
|
|
lastUpdate = c2.LastUpdate.Unix()
|
|
|
|
}
|
|
|
|
if c1 != nil {
|
|
|
|
lastUpdate = c1.LastUpdate.Unix()
|
|
|
|
}
|
2016-12-27 08:51:47 +03:00
|
|
|
|
|
|
|
edge := &lnrpc.ChannelEdge{
|
2017-03-06 04:28:12 +03:00
|
|
|
ChannelId: edgeInfo.ChannelID,
|
|
|
|
ChanPoint: edgeInfo.ChannelPoint.String(),
|
2017-01-18 00:20:06 +03:00
|
|
|
// TODO(roasbeef): update should be on edge info itself
|
|
|
|
LastUpdate: uint32(lastUpdate),
|
2018-01-31 07:30:00 +03:00
|
|
|
Node1Pub: hex.EncodeToString(edgeInfo.NodeKey1Bytes[:]),
|
|
|
|
Node2Pub: hex.EncodeToString(edgeInfo.NodeKey2Bytes[:]),
|
2017-03-06 04:28:12 +03:00
|
|
|
Capacity: int64(edgeInfo.Capacity),
|
2016-12-27 08:51:47 +03:00
|
|
|
}
|
|
|
|
|
2017-01-18 00:20:06 +03:00
|
|
|
if c1 != nil {
|
|
|
|
edge.Node1Policy = &lnrpc.RoutingPolicy{
|
2017-03-06 04:28:12 +03:00
|
|
|
TimeLockDelta: uint32(c1.TimeLockDelta),
|
2017-01-18 00:20:06 +03:00
|
|
|
MinHtlc: int64(c1.MinHTLC),
|
|
|
|
FeeBaseMsat: int64(c1.FeeBaseMSat),
|
|
|
|
FeeRateMilliMsat: int64(c1.FeeProportionalMillionths),
|
2018-06-14 05:38:41 +03:00
|
|
|
Disabled: c1.Flags&lnwire.ChanUpdateDisabled != 0,
|
2017-01-18 00:20:06 +03:00
|
|
|
}
|
2016-12-27 08:51:47 +03:00
|
|
|
}
|
|
|
|
|
2017-01-18 00:20:06 +03:00
|
|
|
if c2 != nil {
|
|
|
|
edge.Node2Policy = &lnrpc.RoutingPolicy{
|
2017-03-06 04:28:12 +03:00
|
|
|
TimeLockDelta: uint32(c2.TimeLockDelta),
|
2017-01-18 00:20:06 +03:00
|
|
|
MinHtlc: int64(c2.MinHTLC),
|
|
|
|
FeeBaseMsat: int64(c2.FeeBaseMSat),
|
|
|
|
FeeRateMilliMsat: int64(c2.FeeProportionalMillionths),
|
2018-06-14 05:38:41 +03:00
|
|
|
Disabled: c2.Flags&lnwire.ChanUpdateDisabled != 0,
|
2017-01-18 00:20:06 +03:00
|
|
|
}
|
2016-12-27 08:51:47 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return edge
|
|
|
|
}
|
|
|
|
|
2017-08-04 15:57:48 +03:00
|
|
|
// GetChanInfo returns the latest authenticated network announcement for the
|
2016-12-27 08:51:47 +03:00
|
|
|
// given channel identified by its channel ID: an 8-byte integer which uniquely
|
|
|
|
// identifies the location of transaction's funding output within the block
|
|
|
|
// chain.
|
2017-08-22 09:25:41 +03:00
|
|
|
func (r *rpcServer) GetChanInfo(ctx context.Context,
|
|
|
|
in *lnrpc.ChanInfoRequest) (*lnrpc.ChannelEdge, error) {
|
|
|
|
|
2016-12-27 08:51:47 +03:00
|
|
|
graph := r.server.chanDB.ChannelGraph()
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2017-03-06 04:28:12 +03:00
|
|
|
edgeInfo, edge1, edge2, err := graph.FetchChannelEdgesByID(in.ChanId)
|
2016-12-27 08:51:47 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Convert the database's edge format into the network/RPC edge format
|
|
|
|
// which couples the edge itself along with the directional node
|
|
|
|
// routing policies of each node involved within the channel.
|
2017-03-06 04:28:12 +03:00
|
|
|
channelEdge := marshalDbEdge(edgeInfo, edge1, edge2)
|
2016-12-27 08:51:47 +03:00
|
|
|
|
|
|
|
return channelEdge, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetNodeInfo returns the latest advertised and aggregate authenticated
|
|
|
|
// channel information for the specified node identified by its public key.
|
2017-08-22 09:25:41 +03:00
|
|
|
func (r *rpcServer) GetNodeInfo(ctx context.Context,
|
|
|
|
in *lnrpc.NodeInfoRequest) (*lnrpc.NodeInfo, error) {
|
|
|
|
|
2016-12-27 08:51:47 +03:00
|
|
|
graph := r.server.chanDB.ChannelGraph()
|
|
|
|
|
|
|
|
// First, parse the hex-encoded public key into a full in-memory public
|
|
|
|
// key object we can work with for querying.
|
|
|
|
pubKeyBytes, err := hex.DecodeString(in.PubKey)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
pubKey, err := btcec.ParsePubKey(pubKeyBytes, btcec.S256())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the public key decoded, attempt to fetch the node corresponding
|
|
|
|
// to this public key. If the node cannot be found, then an error will
|
|
|
|
// be returned.
|
|
|
|
node, err := graph.FetchLightningNode(pubKey)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the node obtained, we'll now iterate through all its out going
|
|
|
|
// edges to gather some basic statistics about its out going channels.
|
|
|
|
var (
|
2018-02-07 06:11:11 +03:00
|
|
|
numChannels uint32
|
|
|
|
totalCapacity btcutil.Amount
|
2016-12-27 08:51:47 +03:00
|
|
|
)
|
2017-04-14 23:17:51 +03:00
|
|
|
if err := node.ForEachChannel(nil, func(_ *bolt.Tx, edge *channeldb.ChannelEdgeInfo,
|
2017-08-22 09:25:41 +03:00
|
|
|
_, _ *channeldb.ChannelEdgePolicy) error {
|
2017-03-06 04:28:12 +03:00
|
|
|
|
2016-12-27 08:51:47 +03:00
|
|
|
numChannels++
|
2018-02-07 06:11:11 +03:00
|
|
|
totalCapacity += edge.Capacity
|
2016-12-27 08:51:47 +03:00
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-09-21 03:15:26 +03:00
|
|
|
|
2017-02-17 12:29:23 +03:00
|
|
|
nodeAddrs := make([]*lnrpc.NodeAddress, 0)
|
|
|
|
for _, addr := range node.Addresses {
|
|
|
|
nodeAddr := &lnrpc.NodeAddress{
|
|
|
|
Network: addr.Network(),
|
|
|
|
Addr: addr.String(),
|
|
|
|
}
|
|
|
|
nodeAddrs = append(nodeAddrs, nodeAddr)
|
|
|
|
}
|
2017-01-18 00:20:06 +03:00
|
|
|
// TODO(roasbeef): list channels as well?
|
2017-12-03 05:37:34 +03:00
|
|
|
|
|
|
|
nodeColor := fmt.Sprintf("#%02x%02x%02x", node.Color.R, node.Color.G, node.Color.B)
|
2016-12-27 08:51:47 +03:00
|
|
|
return &lnrpc.NodeInfo{
|
|
|
|
Node: &lnrpc.LightningNode{
|
|
|
|
LastUpdate: uint32(node.LastUpdate.Unix()),
|
|
|
|
PubKey: in.PubKey,
|
2017-02-17 12:29:23 +03:00
|
|
|
Addresses: nodeAddrs,
|
2016-12-27 08:51:47 +03:00
|
|
|
Alias: node.Alias,
|
2017-12-03 05:37:34 +03:00
|
|
|
Color: nodeColor,
|
2016-12-27 08:51:47 +03:00
|
|
|
},
|
|
|
|
NumChannels: numChannels,
|
2018-02-07 06:11:11 +03:00
|
|
|
TotalCapacity: int64(totalCapacity),
|
2016-12-27 08:51:47 +03:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2017-03-21 05:05:37 +03:00
|
|
|
// QueryRoutes attempts to query the daemons' Channel Router for a possible
|
2016-12-27 08:51:47 +03:00
|
|
|
// route to a target destination capable of carrying a specific amount of
|
|
|
|
// satoshis within the route's flow. The retuned route contains the full
|
|
|
|
// details required to craft and send an HTLC, also including the necessary
|
2018-02-07 06:11:11 +03:00
|
|
|
// information that should be present within the Sphinx packet encapsulated
|
2016-12-27 08:51:47 +03:00
|
|
|
// within the HTLC.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): should return a slice of routes in reality
|
|
|
|
// * create separate PR to send based on well formatted route
|
2017-08-18 04:50:57 +03:00
|
|
|
func (r *rpcServer) QueryRoutes(ctx context.Context,
|
2017-03-21 05:05:37 +03:00
|
|
|
in *lnrpc.QueryRoutesRequest) (*lnrpc.QueryRoutesResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
// First parse the hex-encoded public key into a full public key object
|
2016-12-27 08:51:47 +03:00
|
|
|
// we can properly manipulate.
|
|
|
|
pubKeyBytes, err := hex.DecodeString(in.PubKey)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
pubKey, err := btcec.ParsePubKey(pubKeyBytes, btcec.S256())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-08-22 10:28:40 +03:00
|
|
|
// Currently, within the bootstrap phase of the network, we limit the
|
|
|
|
// largest payment size allotted to (2^32) - 1 mSAT or 4.29 million
|
|
|
|
// satoshis.
|
|
|
|
amt := btcutil.Amount(in.Amt)
|
|
|
|
amtMSat := lnwire.NewMSatFromSatoshis(amt)
|
|
|
|
if amtMSat > maxPaymentMSat {
|
|
|
|
return nil, fmt.Errorf("payment of %v is too large, max payment "+
|
|
|
|
"allowed is %v", amt, maxPaymentMSat.ToSatoshis())
|
|
|
|
}
|
|
|
|
|
2018-04-19 17:27:37 +03:00
|
|
|
feeLimit := calculateFeeLimit(in.FeeLimit, amtMSat)
|
|
|
|
|
2016-12-27 08:51:47 +03:00
|
|
|
// Query the channel router for a possible path to the destination that
|
|
|
|
// can carry `in.Amt` satoshis _including_ the total fee required on
|
|
|
|
// the route.
|
2018-02-15 15:27:05 +03:00
|
|
|
var (
|
|
|
|
routes []*routing.Route
|
|
|
|
findErr error
|
2018-02-13 03:29:07 +03:00
|
|
|
)
|
2018-02-15 15:27:05 +03:00
|
|
|
if in.FinalCltvDelta == 0 {
|
|
|
|
routes, findErr = r.server.chanRouter.FindRoutes(
|
2018-04-19 17:27:37 +03:00
|
|
|
pubKey, amtMSat, feeLimit, uint32(in.NumRoutes),
|
2018-02-15 15:27:05 +03:00
|
|
|
)
|
|
|
|
} else {
|
|
|
|
routes, findErr = r.server.chanRouter.FindRoutes(
|
2018-04-19 17:27:37 +03:00
|
|
|
pubKey, amtMSat, feeLimit, uint32(in.NumRoutes),
|
|
|
|
uint16(in.FinalCltvDelta),
|
2018-02-15 15:27:05 +03:00
|
|
|
)
|
|
|
|
}
|
|
|
|
if findErr != nil {
|
|
|
|
return nil, findErr
|
2016-08-20 23:49:35 +03:00
|
|
|
}
|
2016-09-21 03:15:26 +03:00
|
|
|
|
2018-02-27 03:31:40 +03:00
|
|
|
// As the number of returned routes can be less than the number of
|
|
|
|
// requested routes, we'll clamp down the length of the response to the
|
|
|
|
// minimum of the two.
|
|
|
|
numRoutes := int32(len(routes))
|
|
|
|
if in.NumRoutes < numRoutes {
|
|
|
|
numRoutes = in.NumRoutes
|
|
|
|
}
|
|
|
|
|
2017-03-21 05:05:37 +03:00
|
|
|
// For each valid route, we'll convert the result into the format
|
|
|
|
// required by the RPC system.
|
|
|
|
routeResp := &lnrpc.QueryRoutesResponse{
|
2018-02-13 03:29:07 +03:00
|
|
|
Routes: make([]*lnrpc.Route, 0, in.NumRoutes),
|
2017-03-21 05:05:37 +03:00
|
|
|
}
|
2018-02-27 03:31:40 +03:00
|
|
|
for i := int32(0); i < numRoutes; i++ {
|
2018-02-13 03:29:07 +03:00
|
|
|
routeResp.Routes = append(
|
|
|
|
routeResp.Routes, marshallRoute(routes[i]),
|
|
|
|
)
|
2017-03-21 05:05:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return routeResp, nil
|
2017-01-17 07:41:32 +03:00
|
|
|
}
|
|
|
|
|
2017-12-11 04:02:36 +03:00
|
|
|
func marshallRoute(route *routing.Route) *lnrpc.Route {
|
2016-12-27 08:51:47 +03:00
|
|
|
resp := &lnrpc.Route{
|
|
|
|
TotalTimeLock: route.TotalTimeLock,
|
2017-08-22 09:25:41 +03:00
|
|
|
TotalFees: int64(route.TotalFees.ToSatoshis()),
|
2018-04-12 09:24:03 +03:00
|
|
|
TotalFeesMsat: int64(route.TotalFees),
|
2017-08-22 09:25:41 +03:00
|
|
|
TotalAmt: int64(route.TotalAmount.ToSatoshis()),
|
2018-04-12 09:24:03 +03:00
|
|
|
TotalAmtMsat: int64(route.TotalAmount),
|
2016-12-27 08:51:47 +03:00
|
|
|
Hops: make([]*lnrpc.Hop, len(route.Hops)),
|
|
|
|
}
|
|
|
|
for i, hop := range route.Hops {
|
|
|
|
resp.Hops[i] = &lnrpc.Hop{
|
2018-04-12 09:24:03 +03:00
|
|
|
ChanId: hop.Channel.ChannelID,
|
2018-06-04 23:10:05 +03:00
|
|
|
ChanCapacity: int64(hop.Channel.Bandwidth.ToSatoshis()),
|
2018-04-12 09:24:03 +03:00
|
|
|
AmtToForward: int64(hop.AmtToForward.ToSatoshis()),
|
|
|
|
AmtToForwardMsat: int64(hop.AmtToForward),
|
|
|
|
Fee: int64(hop.Fee.ToSatoshis()),
|
|
|
|
FeeMsat: int64(hop.Fee),
|
|
|
|
Expiry: uint32(hop.OutgoingTimeLock),
|
2016-12-27 08:51:47 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-17 07:41:32 +03:00
|
|
|
return resp
|
2016-12-27 08:51:47 +03:00
|
|
|
}
|
|
|
|
|
2018-05-01 11:17:55 +03:00
|
|
|
func unmarshallRoute(rpcroute *lnrpc.Route,
|
|
|
|
graph *channeldb.ChannelGraph) (*routing.Route, error) {
|
|
|
|
|
|
|
|
route := &routing.Route{
|
|
|
|
TotalTimeLock: rpcroute.TotalTimeLock,
|
|
|
|
TotalFees: lnwire.MilliSatoshi(rpcroute.TotalFeesMsat),
|
|
|
|
TotalAmount: lnwire.MilliSatoshi(rpcroute.TotalAmtMsat),
|
|
|
|
Hops: make([]*routing.Hop, len(rpcroute.Hops)),
|
|
|
|
}
|
|
|
|
|
|
|
|
node, err := graph.SourceNode()
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to fetch source node from graph "+
|
|
|
|
"while unmarshaling route. %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, hop := range rpcroute.Hops {
|
|
|
|
edgeInfo, c1, c2, err := graph.FetchChannelEdgesByID(hop.ChanId)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to fetch channel edges by "+
|
|
|
|
"channel ID for hop (%d): %v", i, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var channelEdgePolicy *channeldb.ChannelEdgePolicy
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case bytes.Equal(node.PubKeyBytes[:], c1.Node.PubKeyBytes[:]):
|
|
|
|
channelEdgePolicy = c2
|
|
|
|
node = c2.Node
|
|
|
|
case bytes.Equal(node.PubKeyBytes[:], c2.Node.PubKeyBytes[:]):
|
|
|
|
channelEdgePolicy = c1
|
|
|
|
node = c1.Node
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("could not find channel edge for hop=%d", i)
|
|
|
|
}
|
|
|
|
|
|
|
|
routingHop := &routing.ChannelHop{
|
|
|
|
ChannelEdgePolicy: channelEdgePolicy,
|
2018-06-04 23:10:05 +03:00
|
|
|
Bandwidth: lnwire.NewMSatFromSatoshis(
|
|
|
|
btcutil.Amount(hop.ChanCapacity)),
|
|
|
|
Chain: edgeInfo.ChainHash,
|
2018-05-01 11:17:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
route.Hops[i] = &routing.Hop{
|
|
|
|
Channel: routingHop,
|
|
|
|
OutgoingTimeLock: hop.Expiry,
|
|
|
|
AmtToForward: lnwire.MilliSatoshi(hop.AmtToForwardMsat),
|
|
|
|
Fee: lnwire.MilliSatoshi(hop.FeeMsat),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return route, nil
|
|
|
|
}
|
|
|
|
|
2016-12-27 08:51:47 +03:00
|
|
|
// GetNetworkInfo returns some basic stats about the known channel graph from
|
|
|
|
// the PoV of the node.
|
2017-08-22 09:25:41 +03:00
|
|
|
func (r *rpcServer) GetNetworkInfo(ctx context.Context,
|
|
|
|
_ *lnrpc.NetworkInfoRequest) (*lnrpc.NetworkInfo, error) {
|
|
|
|
|
2016-12-27 08:51:47 +03:00
|
|
|
graph := r.server.chanDB.ChannelGraph()
|
|
|
|
|
|
|
|
var (
|
|
|
|
numNodes uint32
|
|
|
|
numChannels uint32
|
|
|
|
maxChanOut uint32
|
|
|
|
totalNetworkCapacity btcutil.Amount
|
|
|
|
minChannelSize btcutil.Amount = math.MaxInt64
|
|
|
|
maxChannelSize btcutil.Amount
|
|
|
|
)
|
|
|
|
|
2017-04-14 23:17:51 +03:00
|
|
|
// We'll use this map to de-duplicate channels during our traversal.
|
|
|
|
// This is needed since channels are directional, so there will be two
|
|
|
|
// edges for each channel within the graph.
|
|
|
|
seenChans := make(map[uint64]struct{})
|
2016-12-27 08:51:47 +03:00
|
|
|
|
2017-04-14 23:17:51 +03:00
|
|
|
// We'll run through all the known nodes in the within our view of the
|
2016-12-27 08:51:47 +03:00
|
|
|
// network, tallying up the total number of nodes, and also gathering
|
2017-04-14 23:17:51 +03:00
|
|
|
// each node so we can measure the graph diameter and degree stats
|
2016-12-27 08:51:47 +03:00
|
|
|
// below.
|
2017-04-14 23:17:51 +03:00
|
|
|
if err := graph.ForEachNode(nil, func(tx *bolt.Tx, node *channeldb.LightningNode) error {
|
|
|
|
// Increment the total number of nodes with each iteration.
|
2016-12-27 08:51:47 +03:00
|
|
|
numNodes++
|
|
|
|
|
2017-04-14 23:17:51 +03:00
|
|
|
// For each channel we'll compute the out degree of each node,
|
|
|
|
// and also update our running tallies of the min/max channel
|
|
|
|
// capacity, as well as the total channel capacity. We pass
|
|
|
|
// through the db transaction from the outer view so we can
|
|
|
|
// re-use it within this inner view.
|
2016-12-27 08:51:47 +03:00
|
|
|
var outDegree uint32
|
2017-04-14 23:17:51 +03:00
|
|
|
if err := node.ForEachChannel(tx, func(_ *bolt.Tx,
|
2017-08-22 09:25:41 +03:00
|
|
|
edge *channeldb.ChannelEdgeInfo, _, _ *channeldb.ChannelEdgePolicy) error {
|
2017-03-06 04:28:12 +03:00
|
|
|
|
2017-04-14 23:17:51 +03:00
|
|
|
// Bump up the out degree for this node for each
|
|
|
|
// channel encountered.
|
2016-12-27 08:51:47 +03:00
|
|
|
outDegree++
|
|
|
|
|
2017-04-14 23:17:51 +03:00
|
|
|
// If we've already seen this channel, then we'll
|
|
|
|
// return early to ensure that we don't double-count
|
|
|
|
// stats.
|
|
|
|
if _, ok := seenChans[edge.ChannelID]; ok {
|
|
|
|
return nil
|
|
|
|
}
|
2016-12-27 08:51:47 +03:00
|
|
|
|
2017-04-14 23:17:51 +03:00
|
|
|
// Compare the capacity of this channel against the
|
|
|
|
// running min/max to see if we should update the
|
|
|
|
// extrema.
|
|
|
|
chanCapacity := edge.Capacity
|
|
|
|
if chanCapacity < minChannelSize {
|
|
|
|
minChannelSize = chanCapacity
|
|
|
|
}
|
|
|
|
if chanCapacity > maxChannelSize {
|
|
|
|
maxChannelSize = chanCapacity
|
|
|
|
}
|
2017-03-06 04:28:12 +03:00
|
|
|
|
2017-04-14 23:17:51 +03:00
|
|
|
// Accumulate the total capacity of this channel to the
|
|
|
|
// network wide-capacity.
|
|
|
|
totalNetworkCapacity += chanCapacity
|
2016-12-27 08:51:47 +03:00
|
|
|
|
2017-04-14 23:17:51 +03:00
|
|
|
numChannels++
|
2016-12-27 08:51:47 +03:00
|
|
|
|
2017-04-14 23:17:51 +03:00
|
|
|
seenChans[edge.ChannelID] = struct{}{}
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-27 08:51:47 +03:00
|
|
|
|
2017-04-14 23:17:51 +03:00
|
|
|
// Finally, if the out degree of this node is greater than what
|
|
|
|
// we've seen so far, update the maxChanOut variable.
|
|
|
|
if outDegree > maxChanOut {
|
|
|
|
maxChanOut = outDegree
|
|
|
|
}
|
2016-12-27 08:51:47 +03:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-05-25 03:28:35 +03:00
|
|
|
// If we don't have any channels, then reset the minChannelSize to zero
|
2017-12-03 05:37:34 +03:00
|
|
|
// to avoid outputting NaN in encoded JSON.
|
2017-05-25 03:28:35 +03:00
|
|
|
if numChannels == 0 {
|
|
|
|
minChannelSize = 0
|
|
|
|
}
|
|
|
|
|
2017-01-30 02:02:57 +03:00
|
|
|
// TODO(roasbeef): graph diameter
|
|
|
|
|
2016-12-27 08:51:47 +03:00
|
|
|
// TODO(roasbeef): also add oldest channel?
|
2017-01-18 00:20:06 +03:00
|
|
|
// * also add median channel size
|
2017-05-25 03:28:35 +03:00
|
|
|
netInfo := &lnrpc.NetworkInfo{
|
2016-12-27 08:51:47 +03:00
|
|
|
MaxOutDegree: maxChanOut,
|
|
|
|
AvgOutDegree: float64(numChannels) / float64(numNodes),
|
|
|
|
NumNodes: numNodes,
|
|
|
|
NumChannels: numChannels,
|
|
|
|
TotalNetworkCapacity: int64(totalNetworkCapacity),
|
|
|
|
AvgChannelSize: float64(totalNetworkCapacity) / float64(numChannels),
|
2017-05-25 03:28:35 +03:00
|
|
|
|
|
|
|
MinChannelSize: int64(minChannelSize),
|
|
|
|
MaxChannelSize: int64(maxChannelSize),
|
|
|
|
}
|
|
|
|
|
|
|
|
// Similarly, if we don't have any channels, then we'll also set the
|
|
|
|
// average channel size to zero in order to avoid weird JSON encoding
|
|
|
|
// outputs.
|
|
|
|
if numChannels == 0 {
|
|
|
|
netInfo.AvgChannelSize = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return netInfo, nil
|
2016-07-15 14:02:59 +03:00
|
|
|
}
|
2016-12-05 14:59:36 +03:00
|
|
|
|
2017-05-12 00:55:56 +03:00
|
|
|
// StopDaemon will send a shutdown request to the interrupt handler, triggering
|
|
|
|
// a graceful shutdown of the daemon.
|
2017-08-22 09:25:41 +03:00
|
|
|
func (r *rpcServer) StopDaemon(ctx context.Context,
|
|
|
|
_ *lnrpc.StopRequest) (*lnrpc.StopResponse, error) {
|
|
|
|
|
2018-06-15 06:19:45 +03:00
|
|
|
signal.RequestShutdown()
|
2017-05-12 00:55:56 +03:00
|
|
|
return &lnrpc.StopResponse{}, nil
|
|
|
|
}
|
|
|
|
|
2017-03-14 06:39:16 +03:00
|
|
|
// SubscribeChannelGraph launches a streaming RPC that allows the caller to
|
|
|
|
// receive notifications upon any changes the channel graph topology from the
|
|
|
|
// review of the responding node. Events notified include: new nodes coming
|
|
|
|
// online, nodes updating their authenticated attributes, new channels being
|
|
|
|
// advertised, updates in the routing policy for a directional channel edge,
|
|
|
|
// and finally when prior channels are closed on-chain.
|
|
|
|
func (r *rpcServer) SubscribeChannelGraph(req *lnrpc.GraphTopologySubscription,
|
|
|
|
updateStream lnrpc.Lightning_SubscribeChannelGraphServer) error {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2017-03-14 06:39:16 +03:00
|
|
|
// First, we start by subscribing to a new intent to receive
|
|
|
|
// notifications from the channel router.
|
|
|
|
client, err := r.server.chanRouter.SubscribeTopology()
|
|
|
|
if err != nil {
|
2017-03-15 06:06:33 +03:00
|
|
|
return err
|
2017-03-14 06:39:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that the resources for the topology update client is cleaned
|
|
|
|
// up once either the server, or client exists.
|
|
|
|
defer client.Cancel()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
|
|
|
|
// A new update has been sent by the channel router, we'll
|
2017-03-15 06:06:33 +03:00
|
|
|
// marshal it into the form expected by the gRPC client, then
|
2017-03-14 06:39:16 +03:00
|
|
|
// send it off.
|
|
|
|
case topChange, ok := <-client.TopologyChanges:
|
|
|
|
// If the second value from the channel read is nil,
|
|
|
|
// then this means that the channel router is exiting
|
|
|
|
// or the notification client was cancelled. So we'll
|
|
|
|
// exit early.
|
|
|
|
if !ok {
|
2017-05-12 00:55:56 +03:00
|
|
|
return errors.New("server shutting down")
|
2017-03-14 06:39:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Convert the struct from the channel router into the
|
|
|
|
// form expected by the gRPC service then send it off
|
|
|
|
// to the client.
|
|
|
|
graphUpdate := marshallTopologyChange(topChange)
|
|
|
|
if err := updateStream.Send(graphUpdate); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// The server is quitting, so we'll exit immediately. Returning
|
|
|
|
// nil will close the clients read end of the stream.
|
|
|
|
case <-r.quit:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
// marshallTopologyChange performs a mapping from the topology change struct
|
2017-03-14 06:39:16 +03:00
|
|
|
// returned by the router to the form of notifications expected by the current
|
|
|
|
// gRPC service.
|
|
|
|
func marshallTopologyChange(topChange *routing.TopologyChange) *lnrpc.GraphTopologyUpdate {
|
|
|
|
|
|
|
|
// encodeKey is a simple helper function that converts a live public
|
|
|
|
// key into a hex-encoded version of the compressed serialization for
|
|
|
|
// the public key.
|
|
|
|
encodeKey := func(k *btcec.PublicKey) string {
|
|
|
|
return hex.EncodeToString(k.SerializeCompressed())
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeUpdates := make([]*lnrpc.NodeUpdate, len(topChange.NodeUpdates))
|
|
|
|
for i, nodeUpdate := range topChange.NodeUpdates {
|
|
|
|
addrs := make([]string, len(nodeUpdate.Addresses))
|
|
|
|
for i, addr := range nodeUpdate.Addresses {
|
|
|
|
addrs[i] = addr.String()
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeUpdates[i] = &lnrpc.NodeUpdate{
|
|
|
|
Addresses: addrs,
|
|
|
|
IdentityKey: encodeKey(nodeUpdate.IdentityKey),
|
|
|
|
GlobalFeatures: nodeUpdate.GlobalFeatures,
|
|
|
|
Alias: nodeUpdate.Alias,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
channelUpdates := make([]*lnrpc.ChannelEdgeUpdate, len(topChange.ChannelEdgeUpdates))
|
|
|
|
for i, channelUpdate := range topChange.ChannelEdgeUpdates {
|
|
|
|
channelUpdates[i] = &lnrpc.ChannelEdgeUpdate{
|
|
|
|
ChanId: channelUpdate.ChanID,
|
|
|
|
ChanPoint: &lnrpc.ChannelPoint{
|
2018-01-11 07:59:30 +03:00
|
|
|
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
|
|
|
|
FundingTxidBytes: channelUpdate.ChanPoint.Hash[:],
|
|
|
|
},
|
2017-03-14 06:39:16 +03:00
|
|
|
OutputIndex: channelUpdate.ChanPoint.Index,
|
|
|
|
},
|
|
|
|
Capacity: int64(channelUpdate.Capacity),
|
|
|
|
RoutingPolicy: &lnrpc.RoutingPolicy{
|
|
|
|
TimeLockDelta: uint32(channelUpdate.TimeLockDelta),
|
|
|
|
MinHtlc: int64(channelUpdate.MinHTLC),
|
|
|
|
FeeBaseMsat: int64(channelUpdate.BaseFee),
|
|
|
|
FeeRateMilliMsat: int64(channelUpdate.FeeRate),
|
2018-06-14 05:38:41 +03:00
|
|
|
Disabled: channelUpdate.Disabled,
|
2017-03-14 06:39:16 +03:00
|
|
|
},
|
|
|
|
AdvertisingNode: encodeKey(channelUpdate.AdvertisingNode),
|
|
|
|
ConnectingNode: encodeKey(channelUpdate.ConnectingNode),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
closedChans := make([]*lnrpc.ClosedChannelUpdate, len(topChange.ClosedChannels))
|
|
|
|
for i, closedChan := range topChange.ClosedChannels {
|
|
|
|
closedChans[i] = &lnrpc.ClosedChannelUpdate{
|
|
|
|
ChanId: closedChan.ChanID,
|
|
|
|
Capacity: int64(closedChan.Capacity),
|
|
|
|
ClosedHeight: closedChan.ClosedHeight,
|
|
|
|
ChanPoint: &lnrpc.ChannelPoint{
|
2018-01-11 07:59:30 +03:00
|
|
|
FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{
|
|
|
|
FundingTxidBytes: closedChan.ChanPoint.Hash[:],
|
|
|
|
},
|
2017-03-14 06:39:16 +03:00
|
|
|
OutputIndex: closedChan.ChanPoint.Index,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &lnrpc.GraphTopologyUpdate{
|
|
|
|
NodeUpdates: nodeUpdates,
|
|
|
|
ChannelUpdates: channelUpdates,
|
|
|
|
ClosedChans: closedChans,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-05 14:59:36 +03:00
|
|
|
// ListPayments returns a list of all outgoing payments.
|
2017-08-18 04:50:57 +03:00
|
|
|
func (r *rpcServer) ListPayments(ctx context.Context,
|
|
|
|
_ *lnrpc.ListPaymentsRequest) (*lnrpc.ListPaymentsResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2016-12-05 14:59:36 +03:00
|
|
|
rpcsLog.Debugf("[ListPayments]")
|
2016-12-21 12:19:01 +03:00
|
|
|
|
2016-12-05 14:59:36 +03:00
|
|
|
payments, err := r.server.chanDB.FetchAllPayments()
|
2017-02-05 03:52:25 +03:00
|
|
|
if err != nil && err != channeldb.ErrNoPaymentsCreated {
|
2016-12-05 14:59:36 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
2016-12-21 12:19:01 +03:00
|
|
|
|
2016-12-05 14:59:36 +03:00
|
|
|
paymentsResp := &lnrpc.ListPaymentsResponse{
|
|
|
|
Payments: make([]*lnrpc.Payment, len(payments)),
|
|
|
|
}
|
2016-12-31 03:41:59 +03:00
|
|
|
for i, payment := range payments {
|
|
|
|
path := make([]string, len(payment.Path))
|
|
|
|
for i, hop := range payment.Path {
|
|
|
|
path[i] = hex.EncodeToString(hop[:])
|
|
|
|
}
|
|
|
|
|
2017-12-14 04:04:18 +03:00
|
|
|
paymentHash := sha256.Sum256(payment.PaymentPreimage[:])
|
2016-12-31 03:41:59 +03:00
|
|
|
paymentsResp.Payments[i] = &lnrpc.Payment{
|
2017-12-14 04:04:18 +03:00
|
|
|
PaymentHash: hex.EncodeToString(paymentHash[:]),
|
|
|
|
Value: int64(payment.Terms.Value.ToSatoshis()),
|
|
|
|
CreationDate: payment.CreationDate.Unix(),
|
|
|
|
Path: path,
|
2018-03-06 21:21:03 +03:00
|
|
|
Fee: int64(payment.Fee.ToSatoshis()),
|
2017-12-14 04:04:18 +03:00
|
|
|
PaymentPreimage: hex.EncodeToString(payment.PaymentPreimage[:]),
|
2016-12-05 14:59:36 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return paymentsResp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteAllPayments deletes all outgoing payments from DB.
|
2017-08-18 04:50:57 +03:00
|
|
|
func (r *rpcServer) DeleteAllPayments(ctx context.Context,
|
|
|
|
_ *lnrpc.DeleteAllPaymentsRequest) (*lnrpc.DeleteAllPaymentsResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2016-12-05 14:59:36 +03:00
|
|
|
rpcsLog.Debugf("[DeleteAllPayments]")
|
2016-12-21 12:19:01 +03:00
|
|
|
|
2016-12-31 03:41:59 +03:00
|
|
|
if err := r.server.chanDB.DeleteAllPayments(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return &lnrpc.DeleteAllPaymentsResponse{}, nil
|
2016-12-27 08:51:18 +03:00
|
|
|
}
|
2016-12-27 08:51:47 +03:00
|
|
|
|
2017-01-15 05:16:53 +03:00
|
|
|
// DebugLevel allows a caller to programmatically set the logging verbosity of
|
|
|
|
// lnd. The logging can be targeted according to a coarse daemon-wide logging
|
|
|
|
// level, or in a granular fashion to specify the logging for a target
|
|
|
|
// sub-system.
|
|
|
|
func (r *rpcServer) DebugLevel(ctx context.Context,
|
|
|
|
req *lnrpc.DebugLevelRequest) (*lnrpc.DebugLevelResponse, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
2017-01-15 05:16:53 +03:00
|
|
|
// If show is set, then we simply print out the list of available
|
|
|
|
// sub-systems.
|
|
|
|
if req.Show {
|
|
|
|
return &lnrpc.DebugLevelResponse{
|
|
|
|
SubSystems: strings.Join(supportedSubsystems(), " "),
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
rpcsLog.Infof("[debuglevel] changing debug level to: %v", req.LevelSpec)
|
|
|
|
|
|
|
|
// Otherwise, we'll attempt to set the logging level using the
|
|
|
|
// specified level spec.
|
|
|
|
if err := parseAndSetDebugLevels(req.LevelSpec); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return &lnrpc.DebugLevelResponse{}, nil
|
|
|
|
}
|
2017-01-18 00:39:30 +03:00
|
|
|
|
|
|
|
// DecodePayReq takes an encoded payment request string and attempts to decode
|
|
|
|
// it, returning a full description of the conditions encoded within the
|
|
|
|
// payment request.
|
|
|
|
func (r *rpcServer) DecodePayReq(ctx context.Context,
|
|
|
|
req *lnrpc.PayReqString) (*lnrpc.PayReq, error) {
|
2017-08-22 09:25:41 +03:00
|
|
|
|
|
|
|
rpcsLog.Tracef("[decodepayreq] decoding: %v", req.PayReq)
|
|
|
|
|
2017-01-18 00:39:30 +03:00
|
|
|
// Fist we'll attempt to decode the payment request string, if the
|
|
|
|
// request is invalid or the checksum doesn't match, then we'll exit
|
|
|
|
// here with an error.
|
2018-02-19 18:20:54 +03:00
|
|
|
payReq, err := zpay32.Decode(req.PayReq, activeNetParams.Params)
|
2017-01-18 00:39:30 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-09-05 19:08:02 +03:00
|
|
|
// Let the fields default to empty strings.
|
|
|
|
desc := ""
|
|
|
|
if payReq.Description != nil {
|
|
|
|
desc = *payReq.Description
|
|
|
|
}
|
|
|
|
|
|
|
|
descHash := []byte("")
|
|
|
|
if payReq.DescriptionHash != nil {
|
|
|
|
descHash = payReq.DescriptionHash[:]
|
|
|
|
}
|
|
|
|
|
|
|
|
fallbackAddr := ""
|
|
|
|
if payReq.FallbackAddr != nil {
|
|
|
|
fallbackAddr = payReq.FallbackAddr.String()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Expiry time will default to 3600 seconds if not specified
|
|
|
|
// explicitly.
|
|
|
|
expiry := int64(payReq.Expiry().Seconds())
|
|
|
|
|
2018-03-28 07:01:21 +03:00
|
|
|
// Convert between the `lnrpc` and `routing` types.
|
|
|
|
routeHints := createRPCRouteHints(payReq.RouteHints)
|
|
|
|
|
2017-09-05 19:08:02 +03:00
|
|
|
amt := int64(0)
|
|
|
|
if payReq.MilliSat != nil {
|
|
|
|
amt = int64(payReq.MilliSat.ToSatoshis())
|
|
|
|
}
|
|
|
|
|
2017-01-18 00:39:30 +03:00
|
|
|
dest := payReq.Destination.SerializeCompressed()
|
|
|
|
return &lnrpc.PayReq{
|
2017-09-05 19:08:02 +03:00
|
|
|
Destination: hex.EncodeToString(dest),
|
|
|
|
PaymentHash: hex.EncodeToString(payReq.PaymentHash[:]),
|
|
|
|
NumSatoshis: amt,
|
|
|
|
Timestamp: payReq.Timestamp.Unix(),
|
|
|
|
Description: desc,
|
|
|
|
DescriptionHash: hex.EncodeToString(descHash[:]),
|
|
|
|
FallbackAddr: fallbackAddr,
|
|
|
|
Expiry: expiry,
|
2017-10-19 08:16:40 +03:00
|
|
|
CltvExpiry: int64(payReq.MinFinalCLTVExpiry()),
|
2018-03-28 07:01:21 +03:00
|
|
|
RouteHints: routeHints,
|
2017-01-18 00:39:30 +03:00
|
|
|
}, nil
|
|
|
|
}
|
2017-08-22 10:09:43 +03:00
|
|
|
|
|
|
|
// feeBase is the fixed point that fee rate computation are performed over.
|
|
|
|
// Nodes on the network advertise their fee rate using this point as a base.
|
|
|
|
// This means that the minimal possible fee rate if 1e-6, or 0.000001, or
|
|
|
|
// 0.0001%.
|
|
|
|
const feeBase = 1000000
|
|
|
|
|
|
|
|
// FeeReport allows the caller to obtain a report detailing the current fee
|
|
|
|
// schedule enforced by the node globally for each channel.
|
|
|
|
func (r *rpcServer) FeeReport(ctx context.Context,
|
|
|
|
_ *lnrpc.FeeReportRequest) (*lnrpc.FeeReportResponse, error) {
|
|
|
|
|
|
|
|
// TODO(roasbeef): use UnaryInterceptor to add automated logging
|
|
|
|
|
2018-02-28 09:23:09 +03:00
|
|
|
rpcsLog.Debugf("[feereport]")
|
|
|
|
|
2017-08-22 10:09:43 +03:00
|
|
|
channelGraph := r.server.chanDB.ChannelGraph()
|
|
|
|
selfNode, err := channelGraph.SourceNode()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var feeReports []*lnrpc.ChannelFeeReport
|
|
|
|
err = selfNode.ForEachChannel(nil, func(_ *bolt.Tx, chanInfo *channeldb.ChannelEdgeInfo,
|
|
|
|
edgePolicy, _ *channeldb.ChannelEdgePolicy) error {
|
|
|
|
|
2018-06-18 13:35:22 +03:00
|
|
|
// Self node should always have policies for its channels.
|
|
|
|
if edgePolicy == nil {
|
|
|
|
return fmt.Errorf("no policy for outgoing channel %v ",
|
|
|
|
chanInfo.ChannelID)
|
|
|
|
}
|
|
|
|
|
2017-08-22 10:09:43 +03:00
|
|
|
// We'll compute the effective fee rate by converting from a
|
|
|
|
// fixed point fee rate to a floating point fee rate. The fee
|
|
|
|
// rate field in the database the amount of mSAT charged per
|
|
|
|
// 1mil mSAT sent, so will divide by this to get the proper fee
|
|
|
|
// rate.
|
|
|
|
feeRateFixedPoint := edgePolicy.FeeProportionalMillionths
|
|
|
|
feeRate := float64(feeRateFixedPoint) / float64(feeBase)
|
|
|
|
|
|
|
|
// TODO(roasbeef): also add stats for revenue for each channel
|
|
|
|
feeReports = append(feeReports, &lnrpc.ChannelFeeReport{
|
|
|
|
ChanPoint: chanInfo.ChannelPoint.String(),
|
|
|
|
BaseFeeMsat: int64(edgePolicy.FeeBaseMSat),
|
|
|
|
FeePerMil: int64(feeRateFixedPoint),
|
|
|
|
FeeRate: feeRate,
|
|
|
|
})
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-02-28 09:23:09 +03:00
|
|
|
fwdEventLog := r.server.chanDB.ForwardingLog()
|
|
|
|
|
|
|
|
// computeFeeSum is a helper function that computes the total fees for
|
|
|
|
// a particular time slice described by a forwarding event query.
|
|
|
|
computeFeeSum := func(query channeldb.ForwardingEventQuery) (lnwire.MilliSatoshi, error) {
|
|
|
|
|
|
|
|
var totalFees lnwire.MilliSatoshi
|
|
|
|
|
|
|
|
// We'll continue to fetch the next query and accumulate the
|
|
|
|
// fees until the next query returns no events.
|
|
|
|
for {
|
|
|
|
timeSlice, err := fwdEventLog.Query(query)
|
|
|
|
if err != nil {
|
|
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the timeslice is empty, then we'll return as
|
|
|
|
// we've retrieved all the entries in this range.
|
|
|
|
if len(timeSlice.ForwardingEvents) == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we'll tally up an accumulate the total
|
|
|
|
// fees for this time slice.
|
|
|
|
for _, event := range timeSlice.ForwardingEvents {
|
|
|
|
fee := event.AmtIn - event.AmtOut
|
|
|
|
totalFees += fee
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll now take the last offset index returned as
|
|
|
|
// part of this response, and modify our query to start
|
|
|
|
// at this index. This has a pagination effect in the
|
|
|
|
// case that our query bounds has more than 100k
|
|
|
|
// entries.
|
|
|
|
query.IndexOffset = timeSlice.LastIndexOffset
|
|
|
|
}
|
|
|
|
|
|
|
|
return totalFees, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
now := time.Now()
|
|
|
|
|
|
|
|
// Before we perform the queries below, we'll instruct the switch to
|
|
|
|
// flush any pending events to disk. This ensure we get a complete
|
|
|
|
// snapshot at this particular time.
|
|
|
|
if r.server.htlcSwitch.FlushForwardingEvents(); err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to flush forwarding "+
|
|
|
|
"events: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// In addition to returning the current fee schedule for each channel.
|
|
|
|
// We'll also perform a series of queries to obtain the total fees
|
|
|
|
// earned over the past day, week, and month.
|
|
|
|
dayQuery := channeldb.ForwardingEventQuery{
|
|
|
|
StartTime: now.Add(-time.Hour * 24),
|
|
|
|
EndTime: now,
|
|
|
|
NumMaxEvents: 1000,
|
|
|
|
}
|
|
|
|
dayFees, err := computeFeeSum(dayQuery)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to retrieve day fees: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
weekQuery := channeldb.ForwardingEventQuery{
|
|
|
|
StartTime: now.Add(-time.Hour * 24 * 7),
|
|
|
|
EndTime: now,
|
|
|
|
NumMaxEvents: 1000,
|
|
|
|
}
|
|
|
|
weekFees, err := computeFeeSum(weekQuery)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to retrieve day fees: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
monthQuery := channeldb.ForwardingEventQuery{
|
|
|
|
StartTime: now.Add(-time.Hour * 24 * 30),
|
|
|
|
EndTime: now,
|
|
|
|
NumMaxEvents: 1000,
|
|
|
|
}
|
|
|
|
monthFees, err := computeFeeSum(monthQuery)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to retrieve day fees: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-08-22 11:00:07 +03:00
|
|
|
return &lnrpc.FeeReportResponse{
|
|
|
|
ChannelFees: feeReports,
|
2018-02-28 09:23:09 +03:00
|
|
|
DayFeeSum: uint64(dayFees.ToSatoshis()),
|
|
|
|
WeekFeeSum: uint64(weekFees.ToSatoshis()),
|
|
|
|
MonthFeeSum: uint64(monthFees.ToSatoshis()),
|
2017-08-22 11:00:07 +03:00
|
|
|
}, nil
|
2017-08-22 10:09:43 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// minFeeRate is the smallest permitted fee rate within the network. This is
|
2018-02-07 06:11:11 +03:00
|
|
|
// derived by the fact that fee rates are computed using a fixed point of
|
2017-08-22 10:09:43 +03:00
|
|
|
// 1,000,000. As a result, the smallest representable fee rate is 1e-6, or
|
|
|
|
// 0.000001, or 0.0001%.
|
|
|
|
const minFeeRate = 1e-6
|
|
|
|
|
2017-12-17 01:14:58 +03:00
|
|
|
// UpdateChannelPolicy allows the caller to update the channel forwarding policy
|
|
|
|
// for all channels globally, or a particular channel.
|
|
|
|
func (r *rpcServer) UpdateChannelPolicy(ctx context.Context,
|
|
|
|
req *lnrpc.PolicyUpdateRequest) (*lnrpc.PolicyUpdateResponse, error) {
|
2017-08-22 10:09:43 +03:00
|
|
|
|
|
|
|
var targetChans []wire.OutPoint
|
|
|
|
switch scope := req.Scope.(type) {
|
|
|
|
// If the request is targeting all active channels, then we don't need
|
|
|
|
// target any channels by their channel point.
|
2017-12-17 01:14:58 +03:00
|
|
|
case *lnrpc.PolicyUpdateRequest_Global:
|
2017-08-22 10:09:43 +03:00
|
|
|
|
|
|
|
// Otherwise, we're targeting an individual channel by its channel
|
|
|
|
// point.
|
2017-12-17 01:14:58 +03:00
|
|
|
case *lnrpc.PolicyUpdateRequest_ChanPoint:
|
2018-01-11 07:59:30 +03:00
|
|
|
txidHash, err := getChanPointFundingTxid(scope.ChanPoint)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
txid, err := chainhash.NewHash(txidHash)
|
2017-08-22 10:09:43 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
targetChans = append(targetChans, wire.OutPoint{
|
|
|
|
Hash: *txid,
|
|
|
|
Index: scope.ChanPoint.OutputIndex,
|
|
|
|
})
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("unknown scope: %v", scope)
|
|
|
|
}
|
|
|
|
|
|
|
|
// As a sanity check, we'll ensure that the passed fee rate is below
|
2017-12-17 01:14:58 +03:00
|
|
|
// 1e-6, or the lowest allowed fee rate, and that the passed timelock
|
|
|
|
// is large enough.
|
2017-08-22 10:09:43 +03:00
|
|
|
if req.FeeRate < minFeeRate {
|
|
|
|
return nil, fmt.Errorf("fee rate of %v is too small, min fee "+
|
|
|
|
"rate is %v", req.FeeRate, minFeeRate)
|
|
|
|
}
|
|
|
|
|
2017-12-17 01:14:58 +03:00
|
|
|
if req.TimeLockDelta < minTimeLockDelta {
|
|
|
|
return nil, fmt.Errorf("time lock delta of %v is too small, "+
|
|
|
|
"minimum supported is %v", req.TimeLockDelta,
|
|
|
|
minTimeLockDelta)
|
|
|
|
}
|
|
|
|
|
2017-08-22 10:09:43 +03:00
|
|
|
// We'll also need to convert the floating point fee rate we accept
|
|
|
|
// over RPC to the fixed point rate that we use within the protocol. We
|
|
|
|
// do this by multiplying the passed fee rate by the fee base. This
|
|
|
|
// gives us the fixed point, scaled by 1 million that's used within the
|
|
|
|
// protocol.
|
|
|
|
feeRateFixed := uint32(req.FeeRate * feeBase)
|
|
|
|
baseFeeMsat := lnwire.MilliSatoshi(req.BaseFeeMsat)
|
|
|
|
feeSchema := routing.FeeSchema{
|
|
|
|
BaseFee: baseFeeMsat,
|
|
|
|
FeeRate: feeRateFixed,
|
|
|
|
}
|
|
|
|
|
2017-12-17 01:14:58 +03:00
|
|
|
chanPolicy := routing.ChannelPolicy{
|
|
|
|
FeeSchema: feeSchema,
|
|
|
|
TimeLockDelta: req.TimeLockDelta,
|
|
|
|
}
|
|
|
|
|
2018-04-04 06:18:42 +03:00
|
|
|
rpcsLog.Debugf("[updatechanpolicy] updating channel policy base_fee=%v, "+
|
2017-12-17 01:14:58 +03:00
|
|
|
"rate_float=%v, rate_fixed=%v, time_lock_delta: %v, targets=%v",
|
|
|
|
req.BaseFeeMsat, req.FeeRate, feeRateFixed, req.TimeLockDelta,
|
2017-08-22 10:09:43 +03:00
|
|
|
spew.Sdump(targetChans))
|
|
|
|
|
|
|
|
// With the scope resolved, we'll now send this to the
|
2017-12-17 01:14:58 +03:00
|
|
|
// AuthenticatedGossiper so it can propagate the new policy for our
|
2017-08-22 10:09:43 +03:00
|
|
|
// target channel(s).
|
2017-12-17 01:14:58 +03:00
|
|
|
err := r.server.authGossiper.PropagateChanPolicyUpdate(
|
|
|
|
chanPolicy, targetChans...,
|
2017-08-22 10:09:43 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, we'll apply the set of active links amongst the target
|
|
|
|
// channels.
|
|
|
|
//
|
|
|
|
// We create a partially policy as the logic won't overwrite a valid
|
|
|
|
// sub-policy with a "nil" one.
|
|
|
|
p := htlcswitch.ForwardingPolicy{
|
2017-12-17 01:14:58 +03:00
|
|
|
BaseFee: baseFeeMsat,
|
|
|
|
FeeRate: lnwire.MilliSatoshi(feeRateFixed),
|
|
|
|
TimeLockDelta: req.TimeLockDelta,
|
2017-08-22 10:09:43 +03:00
|
|
|
}
|
|
|
|
err = r.server.htlcSwitch.UpdateForwardingPolicies(p, targetChans...)
|
|
|
|
if err != nil {
|
|
|
|
// If we're unable update the fees due to the links not being
|
|
|
|
// online, then we don't need to fail the call. We'll simply
|
|
|
|
// log the failure.
|
|
|
|
rpcsLog.Warnf("Unable to update link fees: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-12-17 01:14:58 +03:00
|
|
|
return &lnrpc.PolicyUpdateResponse{}, nil
|
2017-08-22 10:09:43 +03:00
|
|
|
}
|
2018-02-28 09:23:27 +03:00
|
|
|
|
|
|
|
// ForwardingHistory allows the caller to query the htlcswitch for a record of
|
|
|
|
// all HTLC's forwarded within the target time range, and integer offset within
|
|
|
|
// that time range. If no time-range is specified, then the first chunk of the
|
|
|
|
// past 24 hrs of forwarding history are returned.
|
|
|
|
|
|
|
|
// A list of forwarding events are returned. The size of each forwarding event
|
|
|
|
// is 40 bytes, and the max message size able to be returned in gRPC is 4 MiB.
|
|
|
|
// In order to safely stay under this max limit, we'll return 50k events per
|
|
|
|
// response. Each response has the index offset of the last entry. The index
|
|
|
|
// offset can be provided to the request to allow the caller to skip a series
|
|
|
|
// of records.
|
|
|
|
func (r *rpcServer) ForwardingHistory(ctx context.Context,
|
|
|
|
req *lnrpc.ForwardingHistoryRequest) (*lnrpc.ForwardingHistoryResponse, error) {
|
|
|
|
|
|
|
|
rpcsLog.Debugf("[forwardinghistory]")
|
|
|
|
|
|
|
|
// Before we perform the queries below, we'll instruct the switch to
|
|
|
|
// flush any pending events to disk. This ensure we get a complete
|
|
|
|
// snapshot at this particular time.
|
|
|
|
if err := r.server.htlcSwitch.FlushForwardingEvents(); err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to flush forwarding "+
|
|
|
|
"events: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
startTime, endTime time.Time
|
|
|
|
|
|
|
|
numEvents uint32
|
|
|
|
)
|
|
|
|
|
|
|
|
// If the start and end time were not set, then we'll just return the
|
|
|
|
// records over the past 24 hours.
|
|
|
|
if req.StartTime == 0 && req.EndTime == 0 {
|
|
|
|
now := time.Now()
|
|
|
|
startTime = now.Add(-time.Hour * 24)
|
|
|
|
endTime = now
|
|
|
|
} else {
|
|
|
|
startTime = time.Unix(int64(req.StartTime), 0)
|
|
|
|
endTime = time.Unix(int64(req.EndTime), 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the number of events wasn't specified, then we'll default to
|
|
|
|
// returning the last 100 events.
|
|
|
|
numEvents = req.NumMaxEvents
|
|
|
|
if numEvents == 0 {
|
|
|
|
numEvents = 100
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, we'll map the proto request into a format the is understood by
|
|
|
|
// the forwarding log.
|
|
|
|
eventQuery := channeldb.ForwardingEventQuery{
|
|
|
|
StartTime: startTime,
|
|
|
|
EndTime: endTime,
|
|
|
|
IndexOffset: req.IndexOffset,
|
|
|
|
NumMaxEvents: numEvents,
|
|
|
|
}
|
|
|
|
timeSlice, err := r.server.chanDB.ForwardingLog().Query(eventQuery)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to query forwarding log: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(roasbeef): add settlement latency?
|
|
|
|
// * use FPE on all records?
|
|
|
|
|
|
|
|
// With the events retrieved, we'll now map them into the proper proto
|
|
|
|
// response.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): show in ns for the outside?
|
|
|
|
resp := &lnrpc.ForwardingHistoryResponse{
|
|
|
|
ForwardingEvents: make([]*lnrpc.ForwardingEvent, len(timeSlice.ForwardingEvents)),
|
|
|
|
LastOffsetIndex: timeSlice.LastIndexOffset,
|
|
|
|
}
|
|
|
|
for i, event := range timeSlice.ForwardingEvents {
|
|
|
|
amtInSat := event.AmtIn.ToSatoshis()
|
|
|
|
amtOutSat := event.AmtOut.ToSatoshis()
|
|
|
|
|
|
|
|
resp.ForwardingEvents[i] = &lnrpc.ForwardingEvent{
|
|
|
|
Timestamp: uint64(event.Timestamp.Unix()),
|
|
|
|
ChanIdIn: event.IncomingChanID.ToUint64(),
|
|
|
|
ChanIdOut: event.OutgoingChanID.ToUint64(),
|
|
|
|
AmtIn: uint64(amtInSat),
|
|
|
|
AmtOut: uint64(amtOutSat),
|
|
|
|
Fee: uint64(amtInSat - amtOutSat),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
|
|
|
}
|