multi: move Route to sub-pkg routing/route
This commit is contained in:
parent
42b081bb37
commit
ee257fd0eb
@ -6,7 +6,7 @@ import (
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/lightningnetwork/lnd/channeldb"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/routing"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
)
|
||||
|
||||
// ChannelGraphTimeSeries is an interface that provides time and block based
|
||||
@ -247,7 +247,7 @@ func (c *ChanSeries) FetchChanAnns(chain chainhash.Hash,
|
||||
// We'll use this map to ensure we don't send the same node
|
||||
// announcement more than one time as one node may have many channel
|
||||
// anns we'll need to send.
|
||||
nodePubsSent := make(map[routing.Vertex]struct{})
|
||||
nodePubsSent := make(map[route.Vertex]struct{})
|
||||
|
||||
chanAnns := make([]lnwire.Message, 0, len(channels)*3)
|
||||
for _, channel := range channels {
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/multimutex"
|
||||
"github.com/lightningnetwork/lnd/routing"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
"github.com/lightningnetwork/lnd/ticker"
|
||||
)
|
||||
|
||||
@ -131,7 +132,7 @@ type Config struct {
|
||||
// that the daemon is connected to. If supplied, the exclude parameter
|
||||
// indicates that the target peer should be excluded from the
|
||||
// broadcast.
|
||||
Broadcast func(skips map[routing.Vertex]struct{},
|
||||
Broadcast func(skips map[route.Vertex]struct{},
|
||||
msg ...lnwire.Message) error
|
||||
|
||||
// NotifyWhenOnline is a function that allows the gossiper to be
|
||||
@ -342,7 +343,7 @@ func (d *AuthenticatedGossiper) SynchronizeNode(syncPeer lnpeer.Peer) error {
|
||||
// We'll use this map to ensure we don't send the same node
|
||||
// announcement more than one time as one node may have many channel
|
||||
// anns we'll need to send.
|
||||
nodePubsSent := make(map[routing.Vertex]struct{})
|
||||
nodePubsSent := make(map[route.Vertex]struct{})
|
||||
|
||||
// As peers are expecting channel announcements before node
|
||||
// announcements, we first retrieve the initial announcement, as well as
|
||||
@ -654,14 +655,14 @@ type msgWithSenders struct {
|
||||
msg lnwire.Message
|
||||
|
||||
// sender is the set of peers that sent us this message.
|
||||
senders map[routing.Vertex]struct{}
|
||||
senders map[route.Vertex]struct{}
|
||||
}
|
||||
|
||||
// mergeSyncerMap is used to merge the set of senders of a particular message
|
||||
// with peers that we have an active GossipSyncer with. We do this to ensure
|
||||
// that we don't broadcast messages to any peers that we have active gossip
|
||||
// syncers for.
|
||||
func (m *msgWithSenders) mergeSyncerMap(syncers map[routing.Vertex]*GossipSyncer) {
|
||||
func (m *msgWithSenders) mergeSyncerMap(syncers map[route.Vertex]*GossipSyncer) {
|
||||
for peerPub := range syncers {
|
||||
m.senders[peerPub] = struct{}{}
|
||||
}
|
||||
@ -682,7 +683,7 @@ type deDupedAnnouncements struct {
|
||||
channelUpdates map[channelUpdateID]msgWithSenders
|
||||
|
||||
// nodeAnnouncements are identified by the Vertex field.
|
||||
nodeAnnouncements map[routing.Vertex]msgWithSenders
|
||||
nodeAnnouncements map[route.Vertex]msgWithSenders
|
||||
|
||||
sync.Mutex
|
||||
}
|
||||
@ -704,7 +705,7 @@ func (d *deDupedAnnouncements) reset() {
|
||||
// appropriate key points to the corresponding lnwire.Message.
|
||||
d.channelAnnouncements = make(map[lnwire.ShortChannelID]msgWithSenders)
|
||||
d.channelUpdates = make(map[channelUpdateID]msgWithSenders)
|
||||
d.nodeAnnouncements = make(map[routing.Vertex]msgWithSenders)
|
||||
d.nodeAnnouncements = make(map[route.Vertex]msgWithSenders)
|
||||
}
|
||||
|
||||
// addMsg adds a new message to the current batch. If the message is already
|
||||
@ -722,13 +723,13 @@ func (d *deDupedAnnouncements) addMsg(message networkMsg) {
|
||||
// Channel announcements are identified by the short channel id field.
|
||||
case *lnwire.ChannelAnnouncement:
|
||||
deDupKey := msg.ShortChannelID
|
||||
sender := routing.NewVertex(message.source)
|
||||
sender := route.NewVertex(message.source)
|
||||
|
||||
mws, ok := d.channelAnnouncements[deDupKey]
|
||||
if !ok {
|
||||
mws = msgWithSenders{
|
||||
msg: msg,
|
||||
senders: make(map[routing.Vertex]struct{}),
|
||||
senders: make(map[route.Vertex]struct{}),
|
||||
}
|
||||
mws.senders[sender] = struct{}{}
|
||||
|
||||
@ -744,7 +745,7 @@ func (d *deDupedAnnouncements) addMsg(message networkMsg) {
|
||||
// Channel updates are identified by the (short channel id,
|
||||
// channelflags) tuple.
|
||||
case *lnwire.ChannelUpdate:
|
||||
sender := routing.NewVertex(message.source)
|
||||
sender := route.NewVertex(message.source)
|
||||
deDupKey := channelUpdateID{
|
||||
msg.ShortChannelID,
|
||||
msg.ChannelFlags,
|
||||
@ -770,7 +771,7 @@ func (d *deDupedAnnouncements) addMsg(message networkMsg) {
|
||||
if oldTimestamp < msg.Timestamp {
|
||||
mws = msgWithSenders{
|
||||
msg: msg,
|
||||
senders: make(map[routing.Vertex]struct{}),
|
||||
senders: make(map[route.Vertex]struct{}),
|
||||
}
|
||||
|
||||
// We'll mark the sender of the message in the
|
||||
@ -793,8 +794,8 @@ func (d *deDupedAnnouncements) addMsg(message networkMsg) {
|
||||
// Node announcements are identified by the Vertex field. Use the
|
||||
// NodeID to create the corresponding Vertex.
|
||||
case *lnwire.NodeAnnouncement:
|
||||
sender := routing.NewVertex(message.source)
|
||||
deDupKey := routing.Vertex(msg.NodeID)
|
||||
sender := route.NewVertex(message.source)
|
||||
deDupKey := route.Vertex(msg.NodeID)
|
||||
|
||||
// We do the same for node announcements as we did for channel
|
||||
// updates, as they also carry a timestamp.
|
||||
@ -813,7 +814,7 @@ func (d *deDupedAnnouncements) addMsg(message networkMsg) {
|
||||
if oldTimestamp < msg.Timestamp {
|
||||
mws = msgWithSenders{
|
||||
msg: msg,
|
||||
senders: make(map[routing.Vertex]struct{}),
|
||||
senders: make(map[route.Vertex]struct{}),
|
||||
}
|
||||
|
||||
mws.senders[sender] = struct{}{}
|
||||
@ -1137,7 +1138,7 @@ func (d *AuthenticatedGossiper) InitSyncState(syncPeer lnpeer.Peer) {
|
||||
// PruneSyncState is called by outside sub-systems once a peer that we were
|
||||
// previously connected to has been disconnected. In this case we can stop the
|
||||
// existing GossipSyncer assigned to the peer and free up resources.
|
||||
func (d *AuthenticatedGossiper) PruneSyncState(peer routing.Vertex) {
|
||||
func (d *AuthenticatedGossiper) PruneSyncState(peer route.Vertex) {
|
||||
d.syncMgr.PruneSyncState(peer)
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/routing"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
"github.com/lightningnetwork/lnd/ticker"
|
||||
)
|
||||
|
||||
@ -272,7 +273,7 @@ func (r *mockGraphSource) GetChannelByID(chanID lnwire.ShortChannelID) (
|
||||
}
|
||||
|
||||
func (r *mockGraphSource) FetchLightningNode(
|
||||
nodePub routing.Vertex) (*channeldb.LightningNode, error) {
|
||||
nodePub route.Vertex) (*channeldb.LightningNode, error) {
|
||||
|
||||
for _, node := range r.nodes {
|
||||
if bytes.Equal(nodePub[:], node.PubKeyBytes[:]) {
|
||||
@ -285,7 +286,7 @@ func (r *mockGraphSource) FetchLightningNode(
|
||||
|
||||
// IsStaleNode returns true if the graph source has a node announcement for the
|
||||
// target node with a more recent timestamp.
|
||||
func (r *mockGraphSource) IsStaleNode(nodePub routing.Vertex, timestamp time.Time) bool {
|
||||
func (r *mockGraphSource) IsStaleNode(nodePub route.Vertex, timestamp time.Time) bool {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
@ -312,7 +313,7 @@ func (r *mockGraphSource) IsStaleNode(nodePub routing.Vertex, timestamp time.Tim
|
||||
|
||||
// IsPublicNode determines whether the given vertex is seen as a public node in
|
||||
// the graph from the graph's source node's point of view.
|
||||
func (r *mockGraphSource) IsPublicNode(node routing.Vertex) (bool, error) {
|
||||
func (r *mockGraphSource) IsPublicNode(node route.Vertex) (bool, error) {
|
||||
for _, info := range r.infos {
|
||||
if !bytes.Equal(node[:], info.NodeKey1Bytes[:]) &&
|
||||
!bytes.Equal(node[:], info.NodeKey2Bytes[:]) {
|
||||
@ -721,7 +722,7 @@ func createTestCtx(startHeight uint32) (*testCtx, func(), error) {
|
||||
broadcastedMessage := make(chan msgWithSenders, 10)
|
||||
gossiper := New(Config{
|
||||
Notifier: notifier,
|
||||
Broadcast: func(senders map[routing.Vertex]struct{},
|
||||
Broadcast: func(senders map[route.Vertex]struct{},
|
||||
msgs ...lnwire.Message) error {
|
||||
|
||||
for _, msg := range msgs {
|
||||
@ -785,7 +786,7 @@ func TestProcessAnnouncement(t *testing.T) {
|
||||
defer cleanup()
|
||||
|
||||
assertSenderExistence := func(sender *btcec.PublicKey, msg msgWithSenders) {
|
||||
if _, ok := msg.senders[routing.NewVertex(sender)]; !ok {
|
||||
if _, ok := msg.senders[route.NewVertex(sender)]; !ok {
|
||||
t.Fatalf("sender=%x not present in %v",
|
||||
sender.SerializeCompressed(), spew.Sdump(msg))
|
||||
}
|
||||
@ -1987,7 +1988,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) {
|
||||
if len(announcements.nodeAnnouncements) != 2 {
|
||||
t.Fatal("node announcement not replaced in batch")
|
||||
}
|
||||
nodeID := routing.NewVertex(nodeKeyPriv2.PubKey())
|
||||
nodeID := route.NewVertex(nodeKeyPriv2.PubKey())
|
||||
stored, ok := announcements.nodeAnnouncements[nodeID]
|
||||
if !ok {
|
||||
t.Fatalf("node announcement not found in batch")
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/lightningnetwork/lnd/lnpeer"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/routing"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
"github.com/lightningnetwork/lnd/ticker"
|
||||
)
|
||||
|
||||
@ -45,7 +45,7 @@ type newSyncer struct {
|
||||
// that a peer has disconnected and its GossipSyncer should be removed.
|
||||
type staleSyncer struct {
|
||||
// peer is the peer that has disconnected.
|
||||
peer routing.Vertex
|
||||
peer route.Vertex
|
||||
|
||||
// doneChan serves as a signal to the caller that the SyncManager's
|
||||
// internal state correctly reflects the stale active syncer. This is
|
||||
@ -118,11 +118,11 @@ type SyncManager struct {
|
||||
// activeSyncers is the set of all syncers for which we are currently
|
||||
// receiving graph updates from. The number of possible active syncers
|
||||
// is bounded by NumActiveSyncers.
|
||||
activeSyncers map[routing.Vertex]*GossipSyncer
|
||||
activeSyncers map[route.Vertex]*GossipSyncer
|
||||
|
||||
// inactiveSyncers is the set of all syncers for which we are not
|
||||
// currently receiving new graph updates from.
|
||||
inactiveSyncers map[routing.Vertex]*GossipSyncer
|
||||
inactiveSyncers map[route.Vertex]*GossipSyncer
|
||||
|
||||
wg sync.WaitGroup
|
||||
quit chan struct{}
|
||||
@ -135,9 +135,9 @@ func newSyncManager(cfg *SyncManagerCfg) *SyncManager {
|
||||
newSyncers: make(chan *newSyncer),
|
||||
staleSyncers: make(chan *staleSyncer),
|
||||
activeSyncers: make(
|
||||
map[routing.Vertex]*GossipSyncer, cfg.NumActiveSyncers,
|
||||
map[route.Vertex]*GossipSyncer, cfg.NumActiveSyncers,
|
||||
),
|
||||
inactiveSyncers: make(map[routing.Vertex]*GossipSyncer),
|
||||
inactiveSyncers: make(map[route.Vertex]*GossipSyncer),
|
||||
quit: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
@ -372,7 +372,7 @@ func (m *SyncManager) syncerHandler() {
|
||||
|
||||
// createGossipSyncer creates the GossipSyncer for a newly connected peer.
|
||||
func (m *SyncManager) createGossipSyncer(peer lnpeer.Peer) *GossipSyncer {
|
||||
nodeID := routing.Vertex(peer.PubKey())
|
||||
nodeID := route.Vertex(peer.PubKey())
|
||||
log.Infof("Creating new GossipSyncer for peer=%x", nodeID[:])
|
||||
|
||||
encoding := lnwire.EncodingSortedPlain
|
||||
@ -399,7 +399,7 @@ func (m *SyncManager) createGossipSyncer(peer lnpeer.Peer) *GossipSyncer {
|
||||
// removeGossipSyncer removes all internal references to the disconnected peer's
|
||||
// GossipSyncer and stops it. In the event of an active GossipSyncer being
|
||||
// disconnected, a passive GossipSyncer, if any, will take its place.
|
||||
func (m *SyncManager) removeGossipSyncer(peer routing.Vertex) {
|
||||
func (m *SyncManager) removeGossipSyncer(peer route.Vertex) {
|
||||
m.syncersMu.Lock()
|
||||
defer m.syncersMu.Unlock()
|
||||
|
||||
@ -527,7 +527,7 @@ func (m *SyncManager) forceHistoricalSync() *GossipSyncer {
|
||||
//
|
||||
// NOTE: It's possible for a nil value to be returned if there are no eligible
|
||||
// candidate syncers.
|
||||
func chooseRandomSyncer(syncers map[routing.Vertex]*GossipSyncer,
|
||||
func chooseRandomSyncer(syncers map[route.Vertex]*GossipSyncer,
|
||||
action func(*GossipSyncer) error) *GossipSyncer {
|
||||
|
||||
for _, s := range syncers {
|
||||
@ -583,7 +583,7 @@ func (m *SyncManager) InitSyncState(peer lnpeer.Peer) error {
|
||||
// PruneSyncState is called by outside sub-systems once a peer that we were
|
||||
// previously connected to has been disconnected. In this case we can stop the
|
||||
// existing GossipSyncer assigned to the peer and free up resources.
|
||||
func (m *SyncManager) PruneSyncState(peer routing.Vertex) {
|
||||
func (m *SyncManager) PruneSyncState(peer route.Vertex) {
|
||||
done := make(chan struct{})
|
||||
|
||||
// We avoid returning an error when the SyncManager is stopped since the
|
||||
@ -605,7 +605,7 @@ func (m *SyncManager) PruneSyncState(peer routing.Vertex) {
|
||||
|
||||
// GossipSyncer returns the associated gossip syncer of a peer. The boolean
|
||||
// returned signals whether there exists a gossip syncer for the peer.
|
||||
func (m *SyncManager) GossipSyncer(peer routing.Vertex) (*GossipSyncer, bool) {
|
||||
func (m *SyncManager) GossipSyncer(peer route.Vertex) (*GossipSyncer, bool) {
|
||||
m.syncersMu.Lock()
|
||||
defer m.syncersMu.Unlock()
|
||||
return m.gossipSyncer(peer)
|
||||
@ -613,7 +613,7 @@ func (m *SyncManager) GossipSyncer(peer routing.Vertex) (*GossipSyncer, bool) {
|
||||
|
||||
// gossipSyncer returns the associated gossip syncer of a peer. The boolean
|
||||
// returned signals whether there exists a gossip syncer for the peer.
|
||||
func (m *SyncManager) gossipSyncer(peer routing.Vertex) (*GossipSyncer, bool) {
|
||||
func (m *SyncManager) gossipSyncer(peer route.Vertex) (*GossipSyncer, bool) {
|
||||
syncer, ok := m.inactiveSyncers[peer]
|
||||
if ok {
|
||||
return syncer, true
|
||||
@ -626,16 +626,16 @@ func (m *SyncManager) gossipSyncer(peer routing.Vertex) (*GossipSyncer, bool) {
|
||||
}
|
||||
|
||||
// GossipSyncers returns all of the currently initialized gossip syncers.
|
||||
func (m *SyncManager) GossipSyncers() map[routing.Vertex]*GossipSyncer {
|
||||
func (m *SyncManager) GossipSyncers() map[route.Vertex]*GossipSyncer {
|
||||
m.syncersMu.Lock()
|
||||
defer m.syncersMu.Unlock()
|
||||
return m.gossipSyncers()
|
||||
}
|
||||
|
||||
// gossipSyncers returns all of the currently initialized gossip syncers.
|
||||
func (m *SyncManager) gossipSyncers() map[routing.Vertex]*GossipSyncer {
|
||||
func (m *SyncManager) gossipSyncers() map[route.Vertex]*GossipSyncer {
|
||||
numSyncers := len(m.inactiveSyncers) + len(m.activeSyncers)
|
||||
syncers := make(map[routing.Vertex]*GossipSyncer, numSyncers)
|
||||
syncers := make(map[route.Vertex]*GossipSyncer, numSyncers)
|
||||
|
||||
for _, syncer := range m.inactiveSyncers {
|
||||
syncers[syncer.cfg.peerPub] = syncer
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/routing"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
context "golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@ -19,7 +20,7 @@ type RouterBackend struct {
|
||||
MaxPaymentMSat lnwire.MilliSatoshi
|
||||
|
||||
// SelfNode is the vertex of the node sending the payment.
|
||||
SelfNode routing.Vertex
|
||||
SelfNode route.Vertex
|
||||
|
||||
// FetchChannelCapacity is a closure that we'll use the fetch the total
|
||||
// capacity of a channel to populate in responses.
|
||||
@ -27,10 +28,10 @@ type RouterBackend struct {
|
||||
|
||||
// FindRoutes is a closure that abstracts away how we locate/query for
|
||||
// routes.
|
||||
FindRoutes func(source, target routing.Vertex,
|
||||
FindRoutes func(source, target route.Vertex,
|
||||
amt lnwire.MilliSatoshi, restrictions *routing.RestrictParams,
|
||||
numPaths uint32, finalExpiry ...uint16) (
|
||||
[]*routing.Route, error)
|
||||
[]*route.Route, error)
|
||||
}
|
||||
|
||||
// QueryRoutes attempts to query the daemons' Channel Router for a possible
|
||||
@ -45,18 +46,18 @@ type RouterBackend struct {
|
||||
func (r *RouterBackend) QueryRoutes(ctx context.Context,
|
||||
in *lnrpc.QueryRoutesRequest) (*lnrpc.QueryRoutesResponse, error) {
|
||||
|
||||
parsePubKey := func(key string) (routing.Vertex, error) {
|
||||
parsePubKey := func(key string) (route.Vertex, error) {
|
||||
pubKeyBytes, err := hex.DecodeString(key)
|
||||
if err != nil {
|
||||
return routing.Vertex{}, err
|
||||
return route.Vertex{}, err
|
||||
}
|
||||
|
||||
if len(pubKeyBytes) != 33 {
|
||||
return routing.Vertex{},
|
||||
return route.Vertex{},
|
||||
errors.New("invalid key length")
|
||||
}
|
||||
|
||||
var v routing.Vertex
|
||||
var v route.Vertex
|
||||
copy(v[:], pubKeyBytes)
|
||||
|
||||
return v, nil
|
||||
@ -69,7 +70,7 @@ func (r *RouterBackend) QueryRoutes(ctx context.Context,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var sourcePubKey routing.Vertex
|
||||
var sourcePubKey route.Vertex
|
||||
if in.SourcePubKey != "" {
|
||||
var err error
|
||||
sourcePubKey, err = parsePubKey(in.SourcePubKey)
|
||||
@ -94,12 +95,12 @@ func (r *RouterBackend) QueryRoutes(ctx context.Context,
|
||||
// Unmarshall restrictions from request.
|
||||
feeLimit := calculateFeeLimit(in.FeeLimit, amtMSat)
|
||||
|
||||
ignoredNodes := make(map[routing.Vertex]struct{})
|
||||
ignoredNodes := make(map[route.Vertex]struct{})
|
||||
for _, ignorePubKey := range in.IgnoredNodes {
|
||||
if len(ignorePubKey) != 33 {
|
||||
return nil, fmt.Errorf("invalid ignore node pubkey")
|
||||
}
|
||||
var ignoreVertex routing.Vertex
|
||||
var ignoreVertex route.Vertex
|
||||
copy(ignoreVertex[:], ignorePubKey)
|
||||
ignoredNodes[ignoreVertex] = struct{}{}
|
||||
}
|
||||
@ -131,7 +132,7 @@ func (r *RouterBackend) QueryRoutes(ctx context.Context,
|
||||
// can carry `in.Amt` satoshis _including_ the total fee required on
|
||||
// the route.
|
||||
var (
|
||||
routes []*routing.Route
|
||||
routes []*route.Route
|
||||
findErr error
|
||||
)
|
||||
|
||||
@ -195,7 +196,7 @@ func calculateFeeLimit(feeLimit *lnrpc.FeeLimit,
|
||||
}
|
||||
|
||||
// MarshallRoute marshalls an internal route to an rpc route struct.
|
||||
func (r *RouterBackend) MarshallRoute(route *routing.Route) *lnrpc.Route {
|
||||
func (r *RouterBackend) MarshallRoute(route *route.Route) *lnrpc.Route {
|
||||
resp := &lnrpc.Route{
|
||||
TotalTimeLock: route.TotalTimeLock,
|
||||
TotalFees: int64(route.TotalFees.ToSatoshis()),
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"github.com/btcsuite/btcutil"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/routing"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
)
|
||||
@ -19,7 +20,7 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
sourceKey = routing.Vertex{1, 2, 3}
|
||||
sourceKey = route.Vertex{1, 2, 3}
|
||||
)
|
||||
|
||||
// TestQueryRoutes asserts that query routes rpc parameters are properly parsed
|
||||
@ -30,7 +31,7 @@ func TestQueryRoutes(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var ignoreNodeVertex routing.Vertex
|
||||
var ignoreNodeVertex route.Vertex
|
||||
copy(ignoreNodeVertex[:], ignoreNodeBytes)
|
||||
|
||||
destNodeBytes, err := hex.DecodeString(destKey)
|
||||
@ -55,12 +56,12 @@ func TestQueryRoutes(t *testing.T) {
|
||||
}},
|
||||
}
|
||||
|
||||
route := &routing.Route{}
|
||||
rt := &route.Route{}
|
||||
|
||||
findRoutes := func(source, target routing.Vertex,
|
||||
findRoutes := func(source, target route.Vertex,
|
||||
amt lnwire.MilliSatoshi, restrictions *routing.RestrictParams,
|
||||
numPaths uint32, finalExpiry ...uint16) (
|
||||
[]*routing.Route, error) {
|
||||
[]*route.Route, error) {
|
||||
|
||||
if int64(amt) != request.Amt*1000 {
|
||||
t.Fatal("unexpected amount")
|
||||
@ -100,15 +101,15 @@ func TestQueryRoutes(t *testing.T) {
|
||||
t.Fatal("unexpected ignored node")
|
||||
}
|
||||
|
||||
return []*routing.Route{
|
||||
route,
|
||||
return []*route.Route{
|
||||
rt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
backend := &RouterBackend{
|
||||
MaxPaymentMSat: lnwire.NewMSatFromSatoshis(1000000),
|
||||
FindRoutes: findRoutes,
|
||||
SelfNode: routing.Vertex{1, 2, 3},
|
||||
SelfNode: route.Vertex{1, 2, 3},
|
||||
FetchChannelCapacity: func(chanID uint64) (
|
||||
btcutil.Amount, error) {
|
||||
|
||||
|
@ -15,6 +15,7 @@ import (
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/routing"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
"github.com/lightningnetwork/lnd/zpay32"
|
||||
"google.golang.org/grpc"
|
||||
"gopkg.in/macaroon-bakery.v2/bakery"
|
||||
@ -190,7 +191,7 @@ func (s *Server) SendPayment(ctx context.Context,
|
||||
return nil, fmt.Errorf("zero value invoices are not supported")
|
||||
}
|
||||
|
||||
var destination routing.Vertex
|
||||
var destination route.Vertex
|
||||
copy(destination[:], payReq.Destination.SerializeCompressed())
|
||||
|
||||
// Now that all the information we need has been parsed, we'll map this
|
||||
@ -232,7 +233,7 @@ func (s *Server) EstimateRouteFee(ctx context.Context,
|
||||
if len(req.Dest) != 33 {
|
||||
return nil, errors.New("invalid length destination key")
|
||||
}
|
||||
var destNode routing.Vertex
|
||||
var destNode route.Vertex
|
||||
copy(destNode[:], req.Dest)
|
||||
|
||||
// Next, we'll convert the amount in satoshis to mSAT, which are the
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"github.com/coreos/bbolt"
|
||||
"github.com/lightningnetwork/lnd/channeldb"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
"github.com/lightningnetwork/lnd/zpay32"
|
||||
)
|
||||
|
||||
@ -53,7 +54,7 @@ type missionControl struct {
|
||||
// the time that it was added to the prune view. Vertexes are added to
|
||||
// this map if a caller reports to missionControl a failure localized
|
||||
// to that particular vertex.
|
||||
failedVertexes map[Vertex]time.Time
|
||||
failedVertexes map[route.Vertex]time.Time
|
||||
|
||||
graph *channeldb.ChannelGraph
|
||||
|
||||
@ -77,7 +78,7 @@ func newMissionControl(g *channeldb.ChannelGraph, selfNode *channeldb.LightningN
|
||||
|
||||
return &missionControl{
|
||||
failedEdges: make(map[EdgeLocator]time.Time),
|
||||
failedVertexes: make(map[Vertex]time.Time),
|
||||
failedVertexes: make(map[route.Vertex]time.Time),
|
||||
selfNode: selfNode,
|
||||
queryBandwidth: qb,
|
||||
graph: g,
|
||||
@ -92,7 +93,7 @@ func newMissionControl(g *channeldb.ChannelGraph, selfNode *channeldb.LightningN
|
||||
type graphPruneView struct {
|
||||
edges map[EdgeLocator]struct{}
|
||||
|
||||
vertexes map[Vertex]struct{}
|
||||
vertexes map[route.Vertex]struct{}
|
||||
}
|
||||
|
||||
// GraphPruneView returns a new graphPruneView instance which is to be
|
||||
@ -110,7 +111,7 @@ func (m *missionControl) GraphPruneView() graphPruneView {
|
||||
// For each of the vertexes that have been added to the prune view, if
|
||||
// it is now "stale", then we'll ignore it and avoid adding it to the
|
||||
// view we'll return.
|
||||
vertexes := make(map[Vertex]struct{})
|
||||
vertexes := make(map[route.Vertex]struct{})
|
||||
for vertex, pruneTime := range m.failedVertexes {
|
||||
if now.Sub(pruneTime) >= vertexDecay {
|
||||
log.Tracef("Pruning decayed failure report for vertex %v "+
|
||||
@ -154,11 +155,11 @@ func (m *missionControl) GraphPruneView() graphPruneView {
|
||||
// in order to populate additional edges to explore when finding a path to the
|
||||
// payment's destination.
|
||||
func (m *missionControl) NewPaymentSession(routeHints [][]zpay32.HopHint,
|
||||
target Vertex) (*paymentSession, error) {
|
||||
target route.Vertex) (*paymentSession, error) {
|
||||
|
||||
viewSnapshot := m.GraphPruneView()
|
||||
|
||||
edges := make(map[Vertex][]*channeldb.ChannelEdgePolicy)
|
||||
edges := make(map[route.Vertex][]*channeldb.ChannelEdgePolicy)
|
||||
|
||||
// Traverse through all of the available hop hints and include them in
|
||||
// our edges map, indexed by the public key of the channel's starting
|
||||
@ -200,7 +201,7 @@ func (m *missionControl) NewPaymentSession(routeHints [][]zpay32.HopHint,
|
||||
TimeLockDelta: hopHint.CLTVExpiryDelta,
|
||||
}
|
||||
|
||||
v := NewVertex(hopHint.NodeID)
|
||||
v := route.NewVertex(hopHint.NodeID)
|
||||
edges[v] = append(edges[v], edge)
|
||||
}
|
||||
}
|
||||
@ -234,7 +235,7 @@ func (m *missionControl) NewPaymentSession(routeHints [][]zpay32.HopHint,
|
||||
// skip all path finding, and will instead utilize a set of pre-built routes.
|
||||
// This constructor allows callers to specify their own routes which can be
|
||||
// used for things like channel rebalancing, and swaps.
|
||||
func (m *missionControl) NewPaymentSessionFromRoutes(routes []*Route) *paymentSession {
|
||||
func (m *missionControl) NewPaymentSessionFromRoutes(routes []*route.Route) *paymentSession {
|
||||
return &paymentSession{
|
||||
pruneViewSnapshot: m.GraphPruneView(),
|
||||
haveRoutes: true,
|
||||
@ -285,6 +286,6 @@ func generateBandwidthHints(sourceNode *channeldb.LightningNode,
|
||||
func (m *missionControl) ResetHistory() {
|
||||
m.Lock()
|
||||
m.failedEdges = make(map[EdgeLocator]time.Time)
|
||||
m.failedVertexes = make(map[Vertex]time.Time)
|
||||
m.failedVertexes = make(map[route.Vertex]time.Time)
|
||||
m.Unlock()
|
||||
}
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
"github.com/lightningnetwork/lnd/lnwallet"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/routing/chainview"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -460,9 +461,9 @@ func TestEdgeUpdateNotification(t *testing.T) {
|
||||
|
||||
// Create lookup map for notifications we are intending to receive. Entries
|
||||
// are removed from the map when the anticipated notification is received.
|
||||
var waitingFor = map[Vertex]int{
|
||||
Vertex(node1.PubKeyBytes): 1,
|
||||
Vertex(node2.PubKeyBytes): 2,
|
||||
var waitingFor = map[route.Vertex]int{
|
||||
route.Vertex(node1.PubKeyBytes): 1,
|
||||
route.Vertex(node2.PubKeyBytes): 2,
|
||||
}
|
||||
|
||||
node1Pub, err := node1.PubKey()
|
||||
@ -486,7 +487,7 @@ func TestEdgeUpdateNotification(t *testing.T) {
|
||||
}
|
||||
|
||||
edgeUpdate := ntfn.ChannelEdgeUpdates[0]
|
||||
nodeVertex := NewVertex(edgeUpdate.AdvertisingNode)
|
||||
nodeVertex := route.NewVertex(edgeUpdate.AdvertisingNode)
|
||||
|
||||
if idx, ok := waitingFor[nodeVertex]; ok {
|
||||
switch idx {
|
||||
@ -630,9 +631,9 @@ func TestNodeUpdateNotification(t *testing.T) {
|
||||
|
||||
// Create lookup map for notifications we are intending to receive. Entries
|
||||
// are removed from the map when the anticipated notification is received.
|
||||
var waitingFor = map[Vertex]int{
|
||||
Vertex(node1.PubKeyBytes): 1,
|
||||
Vertex(node2.PubKeyBytes): 2,
|
||||
var waitingFor = map[route.Vertex]int{
|
||||
route.Vertex(node1.PubKeyBytes): 1,
|
||||
route.Vertex(node2.PubKeyBytes): 2,
|
||||
}
|
||||
|
||||
// Exactly two notifications should be sent, each corresponding to the
|
||||
@ -649,7 +650,7 @@ func TestNodeUpdateNotification(t *testing.T) {
|
||||
}
|
||||
|
||||
nodeUpdate := ntfn.NodeUpdates[0]
|
||||
nodeVertex := NewVertex(nodeUpdate.IdentityKey)
|
||||
nodeVertex := route.NewVertex(nodeUpdate.IdentityKey)
|
||||
if idx, ok := waitingFor[nodeVertex]; ok {
|
||||
switch idx {
|
||||
case 1:
|
||||
|
@ -1,18 +1,14 @@
|
||||
package routing
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"container/heap"
|
||||
"math"
|
||||
|
||||
"container/heap"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
"github.com/coreos/bbolt"
|
||||
|
||||
sphinx "github.com/lightningnetwork/lightning-onion"
|
||||
"github.com/lightningnetwork/lnd/channeldb"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -41,34 +37,9 @@ const (
|
||||
|
||||
// pathFinder defines the interface of a path finding algorithm.
|
||||
type pathFinder = func(g *graphParams, r *RestrictParams,
|
||||
source, target Vertex, amt lnwire.MilliSatoshi) (
|
||||
source, target route.Vertex, amt lnwire.MilliSatoshi) (
|
||||
[]*channeldb.ChannelEdgePolicy, error)
|
||||
|
||||
// Hop represents an intermediate or final node of the route. This naming
|
||||
// is in line with the definition given in BOLT #4: Onion Routing Protocol.
|
||||
// The struct houses the channel along which this hop can be reached and
|
||||
// the values necessary to create the HTLC that needs to be sent to the
|
||||
// next hop. It is also used to encode the per-hop payload included within
|
||||
// the Sphinx packet.
|
||||
type Hop struct {
|
||||
// PubKeyBytes is the raw bytes of the public key of the target node.
|
||||
PubKeyBytes Vertex
|
||||
|
||||
// ChannelID is the unique channel ID for the channel. The first 3
|
||||
// bytes are the block height, the next 3 the index within the block,
|
||||
// and the last 2 bytes are the output index for the channel.
|
||||
ChannelID uint64
|
||||
|
||||
// OutgoingTimeLock is the timelock value that should be used when
|
||||
// crafting the _outgoing_ HTLC from this hop.
|
||||
OutgoingTimeLock uint32
|
||||
|
||||
// AmtToForward is the amount that this hop will forward to the next
|
||||
// hop. This value is less than the value that the incoming HTLC
|
||||
// carries as a fee will be subtracted by the hop.
|
||||
AmtToForward lnwire.MilliSatoshi
|
||||
}
|
||||
|
||||
// edgePolicyWithSource is a helper struct to keep track of the source node
|
||||
// of a channel edge. ChannelEdgePolicy only contains to destination node
|
||||
// of the edge.
|
||||
@ -102,89 +73,6 @@ func isSamePath(path1, path2 []*channeldb.ChannelEdgePolicy) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Route represents a path through the channel graph which runs over one or
|
||||
// more channels in succession. This struct carries all the information
|
||||
// required to craft the Sphinx onion packet, and send the payment along the
|
||||
// first hop in the path. A route is only selected as valid if all the channels
|
||||
// have sufficient capacity to carry the initial payment amount after fees are
|
||||
// accounted for.
|
||||
type Route struct {
|
||||
// TotalTimeLock is the cumulative (final) time lock across the entire
|
||||
// route. This is the CLTV value that should be extended to the first
|
||||
// hop in the route. All other hops will decrement the time-lock as
|
||||
// advertised, leaving enough time for all hops to wait for or present
|
||||
// the payment preimage to complete the payment.
|
||||
TotalTimeLock uint32
|
||||
|
||||
// TotalFees is the sum of the fees paid at each hop within the final
|
||||
// route. In the case of a one-hop payment, this value will be zero as
|
||||
// we don't need to pay a fee to ourself.
|
||||
TotalFees lnwire.MilliSatoshi
|
||||
|
||||
// TotalAmount is the total amount of funds required to complete a
|
||||
// payment over this route. This value includes the cumulative fees at
|
||||
// each hop. As a result, the HTLC extended to the first-hop in the
|
||||
// route will need to have at least this many satoshis, otherwise the
|
||||
// route will fail at an intermediate node due to an insufficient
|
||||
// amount of fees.
|
||||
TotalAmount lnwire.MilliSatoshi
|
||||
|
||||
// SourcePubKey is the pubkey of the node where this route originates
|
||||
// from.
|
||||
SourcePubKey Vertex
|
||||
|
||||
// Hops contains details concerning the specific forwarding details at
|
||||
// each hop.
|
||||
Hops []*Hop
|
||||
}
|
||||
|
||||
// HopFee returns the fee charged by the route hop indicated by hopIndex.
|
||||
func (r *Route) HopFee(hopIndex int) lnwire.MilliSatoshi {
|
||||
var incomingAmt lnwire.MilliSatoshi
|
||||
if hopIndex == 0 {
|
||||
incomingAmt = r.TotalAmount
|
||||
} else {
|
||||
incomingAmt = r.Hops[hopIndex-1].AmtToForward
|
||||
}
|
||||
|
||||
// Fee is calculated as difference between incoming and outgoing amount.
|
||||
return incomingAmt - r.Hops[hopIndex].AmtToForward
|
||||
}
|
||||
|
||||
// ToHopPayloads converts a complete route into the series of per-hop payloads
|
||||
// that is to be encoded within each HTLC using an opaque Sphinx packet.
|
||||
func (r *Route) ToHopPayloads() []sphinx.HopData {
|
||||
hopPayloads := make([]sphinx.HopData, len(r.Hops))
|
||||
|
||||
// For each hop encoded within the route, we'll convert the hop struct
|
||||
// to the matching per-hop payload struct as used by the sphinx
|
||||
// package.
|
||||
for i, hop := range r.Hops {
|
||||
hopPayloads[i] = sphinx.HopData{
|
||||
// TODO(roasbeef): properly set realm, make sphinx type
|
||||
// an enum actually?
|
||||
Realm: 0,
|
||||
ForwardAmount: uint64(hop.AmtToForward),
|
||||
OutgoingCltv: hop.OutgoingTimeLock,
|
||||
}
|
||||
|
||||
// As a base case, the next hop is set to all zeroes in order
|
||||
// to indicate that the "last hop" as no further hops after it.
|
||||
nextHop := uint64(0)
|
||||
|
||||
// If we aren't on the last hop, then we set the "next address"
|
||||
// field to be the channel that directly follows it.
|
||||
if i != len(r.Hops)-1 {
|
||||
nextHop = r.Hops[i+1].ChannelID
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint64(hopPayloads[i].NextAddress[:],
|
||||
nextHop)
|
||||
}
|
||||
|
||||
return hopPayloads
|
||||
}
|
||||
|
||||
// newRoute returns a fully valid route between the source and target that's
|
||||
// capable of supporting a payment of `amtToSend` after fees are fully
|
||||
// computed. If the route is too long, or the selected path cannot support the
|
||||
@ -192,12 +80,12 @@ func (r *Route) ToHopPayloads() []sphinx.HopData {
|
||||
//
|
||||
// NOTE: The passed slice of ChannelHops MUST be sorted in forward order: from
|
||||
// the source to the target node of the path finding attempt.
|
||||
func newRoute(amtToSend lnwire.MilliSatoshi, sourceVertex Vertex,
|
||||
func newRoute(amtToSend lnwire.MilliSatoshi, sourceVertex route.Vertex,
|
||||
pathEdges []*channeldb.ChannelEdgePolicy, currentHeight uint32,
|
||||
finalCLTVDelta uint16) (*Route, error) {
|
||||
finalCLTVDelta uint16) (*route.Route, error) {
|
||||
|
||||
var (
|
||||
hops []*Hop
|
||||
hops []*route.Hop
|
||||
|
||||
// totalTimeLock will accumulate the cumulative time lock
|
||||
// across the entire route. This value represents how long the
|
||||
@ -270,13 +158,13 @@ func newRoute(amtToSend lnwire.MilliSatoshi, sourceVertex Vertex,
|
||||
// Since we're traversing the path backwards atm, we prepend
|
||||
// each new hop such that, the final slice of hops will be in
|
||||
// the forwards order.
|
||||
currentHop := &Hop{
|
||||
PubKeyBytes: Vertex(edge.Node.PubKeyBytes),
|
||||
currentHop := &route.Hop{
|
||||
PubKeyBytes: edge.Node.PubKeyBytes,
|
||||
ChannelID: edge.ChannelID,
|
||||
AmtToForward: amtToForward,
|
||||
OutgoingTimeLock: outgoingTimeLock,
|
||||
}
|
||||
hops = append([]*Hop{currentHop}, hops...)
|
||||
hops = append([]*route.Hop{currentHop}, hops...)
|
||||
|
||||
// Finally, we update the amount that needs to flow into the
|
||||
// *next* hop, which is the amount this hop needs to forward,
|
||||
@ -285,8 +173,8 @@ func newRoute(amtToSend lnwire.MilliSatoshi, sourceVertex Vertex,
|
||||
}
|
||||
|
||||
// With the base routing data expressed as hops, build the full route
|
||||
newRoute, err := NewRouteFromHops(
|
||||
nextIncomingAmount, totalTimeLock, sourceVertex, hops,
|
||||
newRoute, err := route.NewRouteFromHops(
|
||||
nextIncomingAmount, totalTimeLock, route.Vertex(sourceVertex), hops,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -295,49 +183,6 @@ func newRoute(amtToSend lnwire.MilliSatoshi, sourceVertex Vertex,
|
||||
return newRoute, nil
|
||||
}
|
||||
|
||||
// NewRouteFromHops creates a new Route structure from the minimally required
|
||||
// information to perform the payment. It infers fee amounts and populates the
|
||||
// node, chan and prev/next hop maps.
|
||||
func NewRouteFromHops(amtToSend lnwire.MilliSatoshi, timeLock uint32,
|
||||
sourceVertex Vertex, hops []*Hop) (*Route, error) {
|
||||
|
||||
if len(hops) == 0 {
|
||||
return nil, ErrNoRouteHopsProvided
|
||||
}
|
||||
|
||||
// First, we'll create a route struct and populate it with the fields
|
||||
// for which the values are provided as arguments of this function.
|
||||
// TotalFees is determined based on the difference between the amount
|
||||
// that is send from the source and the final amount that is received
|
||||
// by the destination.
|
||||
route := &Route{
|
||||
SourcePubKey: sourceVertex,
|
||||
Hops: hops,
|
||||
TotalTimeLock: timeLock,
|
||||
TotalAmount: amtToSend,
|
||||
TotalFees: amtToSend - hops[len(hops)-1].AmtToForward,
|
||||
}
|
||||
|
||||
return route, nil
|
||||
}
|
||||
|
||||
// Vertex is a simple alias for the serialization of a compressed Bitcoin
|
||||
// public key.
|
||||
type Vertex [33]byte
|
||||
|
||||
// NewVertex returns a new Vertex given a public key.
|
||||
func NewVertex(pub *btcec.PublicKey) Vertex {
|
||||
var v Vertex
|
||||
copy(v[:], pub.SerializeCompressed())
|
||||
return v
|
||||
}
|
||||
|
||||
// String returns a human readable version of the Vertex which is the
|
||||
// hex-encoding of the serialized compressed public key.
|
||||
func (v Vertex) String() string {
|
||||
return fmt.Sprintf("%x", v[:])
|
||||
}
|
||||
|
||||
// edgeWeight computes the weight of an edge. This value is used when searching
|
||||
// for the shortest path within the channel graph between two nodes. Weight is
|
||||
// is the fee itself plus a time lock penalty added to it. This benefits
|
||||
@ -368,7 +213,7 @@ type graphParams struct {
|
||||
// additionalEdges is an optional set of edges that should be
|
||||
// considered during path finding, that is not already found in the
|
||||
// channel graph.
|
||||
additionalEdges map[Vertex][]*channeldb.ChannelEdgePolicy
|
||||
additionalEdges map[route.Vertex][]*channeldb.ChannelEdgePolicy
|
||||
|
||||
// bandwidthHints is an optional map from channels to bandwidths that
|
||||
// can be populated if the caller has a better estimate of the current
|
||||
@ -385,7 +230,7 @@ type graphParams struct {
|
||||
type RestrictParams struct {
|
||||
// IgnoredNodes is an optional set of nodes that should be ignored if
|
||||
// encountered during path finding.
|
||||
IgnoredNodes map[Vertex]struct{}
|
||||
IgnoredNodes map[route.Vertex]struct{}
|
||||
|
||||
// IgnoredEdges is an optional set of edges that should be ignored if
|
||||
// encountered during path finding.
|
||||
@ -416,7 +261,7 @@ type RestrictParams struct {
|
||||
// destination node back to source. This is to properly accumulate fees
|
||||
// that need to be paid along the path and accurately check the amount
|
||||
// to forward at every node against the available bandwidth.
|
||||
func findPath(g *graphParams, r *RestrictParams, source, target Vertex,
|
||||
func findPath(g *graphParams, r *RestrictParams, source, target route.Vertex,
|
||||
amt lnwire.MilliSatoshi) ([]*channeldb.ChannelEdgePolicy, error) {
|
||||
|
||||
var err error
|
||||
@ -438,12 +283,12 @@ func findPath(g *graphParams, r *RestrictParams, source, target Vertex,
|
||||
// for the node set with a distance of "infinity". graph.ForEachNode
|
||||
// also returns the source node, so there is no need to add the source
|
||||
// node explicitly.
|
||||
distance := make(map[Vertex]nodeWithDist)
|
||||
distance := make(map[route.Vertex]nodeWithDist)
|
||||
if err := g.graph.ForEachNode(tx, func(_ *bbolt.Tx,
|
||||
node *channeldb.LightningNode) error {
|
||||
// TODO(roasbeef): with larger graph can just use disk seeks
|
||||
// with a visited map
|
||||
distance[Vertex(node.PubKeyBytes)] = nodeWithDist{
|
||||
distance[route.Vertex(node.PubKeyBytes)] = nodeWithDist{
|
||||
dist: infinity,
|
||||
node: node,
|
||||
}
|
||||
@ -452,7 +297,7 @@ func findPath(g *graphParams, r *RestrictParams, source, target Vertex,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
additionalEdgesWithSrc := make(map[Vertex][]*edgePolicyWithSource)
|
||||
additionalEdgesWithSrc := make(map[route.Vertex][]*edgePolicyWithSource)
|
||||
for vertex, outgoingEdgePolicies := range g.additionalEdges {
|
||||
// We'll also include all the nodes found within the additional
|
||||
// edges that are not known to us yet in the distance map.
|
||||
@ -495,7 +340,7 @@ func findPath(g *graphParams, r *RestrictParams, source, target Vertex,
|
||||
// We'll use this map as a series of "next" hop pointers. So to get
|
||||
// from `Vertex` to the target node, we'll take the edge that it's
|
||||
// mapped to within `next`.
|
||||
next := make(map[Vertex]*channeldb.ChannelEdgePolicy)
|
||||
next := make(map[route.Vertex]*channeldb.ChannelEdgePolicy)
|
||||
|
||||
ignoredEdges := r.IgnoredEdges
|
||||
if ignoredEdges == nil {
|
||||
@ -503,16 +348,16 @@ func findPath(g *graphParams, r *RestrictParams, source, target Vertex,
|
||||
}
|
||||
ignoredNodes := r.IgnoredNodes
|
||||
if ignoredNodes == nil {
|
||||
ignoredNodes = make(map[Vertex]struct{})
|
||||
ignoredNodes = make(map[route.Vertex]struct{})
|
||||
}
|
||||
|
||||
// processEdge is a helper closure that will be used to make sure edges
|
||||
// satisfy our specific requirements.
|
||||
processEdge := func(fromNode *channeldb.LightningNode,
|
||||
edge *channeldb.ChannelEdgePolicy,
|
||||
bandwidth lnwire.MilliSatoshi, toNode Vertex) {
|
||||
bandwidth lnwire.MilliSatoshi, toNode route.Vertex) {
|
||||
|
||||
fromVertex := Vertex(fromNode.PubKeyBytes)
|
||||
fromVertex := route.Vertex(fromNode.PubKeyBytes)
|
||||
|
||||
// If this is not a local channel and it is disabled, we will
|
||||
// skip it.
|
||||
@ -674,7 +519,7 @@ func findPath(g *graphParams, r *RestrictParams, source, target Vertex,
|
||||
// Now that we've found the next potential step to take we'll
|
||||
// examine all the incoming edges (channels) from this node to
|
||||
// further our graph traversal.
|
||||
pivot := Vertex(bestNode.PubKeyBytes)
|
||||
pivot := route.Vertex(bestNode.PubKeyBytes)
|
||||
err := bestNode.ForEachChannel(tx, func(tx *bbolt.Tx,
|
||||
edgeInfo *channeldb.ChannelEdgeInfo,
|
||||
_, inEdge *channeldb.ChannelEdgePolicy) error {
|
||||
@ -756,7 +601,7 @@ func findPath(g *graphParams, r *RestrictParams, source, target Vertex,
|
||||
pathEdges = append(pathEdges, nextNode)
|
||||
|
||||
// Advance current node.
|
||||
currentNode = Vertex(nextNode.Node.PubKeyBytes)
|
||||
currentNode = route.Vertex(nextNode.Node.PubKeyBytes)
|
||||
}
|
||||
|
||||
// The route is invalid if it spans more than 20 hops. The current
|
||||
@ -784,7 +629,7 @@ func findPath(g *graphParams, r *RestrictParams, source, target Vertex,
|
||||
// algorithm, rather than attempting to use an unmodified path finding
|
||||
// algorithm in a block box manner.
|
||||
func findPaths(tx *bbolt.Tx, graph *channeldb.ChannelGraph,
|
||||
source, target Vertex, amt lnwire.MilliSatoshi,
|
||||
source, target route.Vertex, amt lnwire.MilliSatoshi,
|
||||
restrictions *RestrictParams, numPaths uint32,
|
||||
bandwidthHints map[uint64]lnwire.MilliSatoshi) (
|
||||
[][]*channeldb.ChannelEdgePolicy, error) {
|
||||
@ -837,7 +682,7 @@ func findPaths(tx *bbolt.Tx, graph *channeldb.ChannelGraph,
|
||||
// These are required to ensure the paths are unique
|
||||
// and loopless.
|
||||
ignoredEdges := make(map[EdgeLocator]struct{})
|
||||
ignoredVertexes := make(map[Vertex]struct{})
|
||||
ignoredVertexes := make(map[route.Vertex]struct{})
|
||||
|
||||
for e := range restrictions.IgnoredEdges {
|
||||
ignoredEdges[e] = struct{}{}
|
||||
@ -878,7 +723,7 @@ func findPaths(tx *bbolt.Tx, graph *channeldb.ChannelGraph,
|
||||
continue
|
||||
}
|
||||
|
||||
ignoredVertexes[Vertex(node)] = struct{}{}
|
||||
ignoredVertexes[route.Vertex(node)] = struct{}{}
|
||||
}
|
||||
|
||||
// With the edges that are part of our root path, and
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"github.com/btcsuite/btcutil"
|
||||
"github.com/lightningnetwork/lnd/channeldb"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
"github.com/lightningnetwork/lnd/zpay32"
|
||||
)
|
||||
|
||||
@ -164,7 +165,7 @@ func parseTestGraph(path string) (*testGraphInstance, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
aliasMap := make(map[string]Vertex)
|
||||
aliasMap := make(map[string]route.Vertex)
|
||||
var source *channeldb.LightningNode
|
||||
|
||||
// First we insert all the nodes within the graph as vertexes.
|
||||
@ -366,7 +367,7 @@ type testGraphInstance struct {
|
||||
// aliasMap is a map from a node's alias to its public key. This type is
|
||||
// provided in order to allow easily look up from the human memorable alias
|
||||
// to an exact node's public key.
|
||||
aliasMap map[string]Vertex
|
||||
aliasMap map[string]route.Vertex
|
||||
|
||||
// privKeyMap maps a node alias to its private key. This is used to be
|
||||
// able to mock a remote node's signing behaviour.
|
||||
@ -395,7 +396,7 @@ func createTestGraphFromChannels(testChannels []*testChannel) (*testGraphInstanc
|
||||
return nil, err
|
||||
}
|
||||
|
||||
aliasMap := make(map[string]Vertex)
|
||||
aliasMap := make(map[string]route.Vertex)
|
||||
privKeyMap := make(map[string]*btcec.PrivateKey)
|
||||
|
||||
nodeIndex := byte(0)
|
||||
@ -611,7 +612,7 @@ func TestFindLowestFeePath(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unable to fetch source node: %v", err)
|
||||
}
|
||||
sourceVertex := Vertex(sourceNode.PubKeyBytes)
|
||||
sourceVertex := route.Vertex(sourceNode.PubKeyBytes)
|
||||
|
||||
const (
|
||||
startingHeight = 100
|
||||
@ -648,8 +649,8 @@ func TestFindLowestFeePath(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func getAliasFromPubKey(pubKey Vertex,
|
||||
aliases map[string]Vertex) string {
|
||||
func getAliasFromPubKey(pubKey route.Vertex,
|
||||
aliases map[string]route.Vertex) string {
|
||||
|
||||
for alias, key := range aliases {
|
||||
if key == pubKey {
|
||||
@ -751,7 +752,7 @@ func testBasicGraphPathFindingCase(t *testing.T, graphInstance *testGraphInstanc
|
||||
if err != nil {
|
||||
t.Fatalf("unable to fetch source node: %v", err)
|
||||
}
|
||||
sourceVertex := Vertex(sourceNode.PubKeyBytes)
|
||||
sourceVertex := route.Vertex(sourceNode.PubKeyBytes)
|
||||
|
||||
const (
|
||||
startingHeight = 100
|
||||
@ -920,7 +921,7 @@ func TestPathFindingWithAdditionalEdges(t *testing.T) {
|
||||
TimeLockDelta: 9,
|
||||
}
|
||||
|
||||
additionalEdges := map[Vertex][]*channeldb.ChannelEdgePolicy{
|
||||
additionalEdges := map[route.Vertex][]*channeldb.ChannelEdgePolicy{
|
||||
graph.aliasMap["songoku"]: {songokuToDoge},
|
||||
}
|
||||
|
||||
@ -1006,7 +1007,7 @@ func TestKShortestPathFinding(t *testing.T) {
|
||||
func TestNewRoute(t *testing.T) {
|
||||
|
||||
var sourceKey [33]byte
|
||||
sourceVertex := Vertex(sourceKey)
|
||||
sourceVertex := route.Vertex(sourceKey)
|
||||
|
||||
const (
|
||||
startingHeight = 100
|
||||
@ -1148,7 +1149,7 @@ func TestNewRoute(t *testing.T) {
|
||||
}}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
assertRoute := func(t *testing.T, route *Route) {
|
||||
assertRoute := func(t *testing.T, route *route.Route) {
|
||||
if route.TotalAmount != testCase.expectedTotalAmount {
|
||||
t.Errorf("Expected total amount is be %v"+
|
||||
", but got %v instead",
|
||||
@ -1294,7 +1295,7 @@ func TestPathNotAvailable(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unable to parse bytes: %v", err)
|
||||
}
|
||||
var unknownNode Vertex
|
||||
var unknownNode route.Vertex
|
||||
copy(unknownNode[:], unknownNodeBytes)
|
||||
|
||||
_, err = findPath(
|
||||
@ -1923,7 +1924,7 @@ func TestPathFindSpecExample(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func assertExpectedPath(t *testing.T, aliasMap map[string]Vertex,
|
||||
func assertExpectedPath(t *testing.T, aliasMap map[string]route.Vertex,
|
||||
path []*channeldb.ChannelEdgePolicy, nodeAliases ...string) {
|
||||
|
||||
if len(path) != len(nodeAliases) {
|
||||
@ -1943,9 +1944,9 @@ func assertExpectedPath(t *testing.T, aliasMap map[string]Vertex,
|
||||
func TestNewRouteFromEmptyHops(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
var source Vertex
|
||||
_, err := NewRouteFromHops(0, 0, source, []*Hop{})
|
||||
if err != ErrNoRouteHopsProvided {
|
||||
var source route.Vertex
|
||||
_, err := route.NewRouteFromHops(0, 0, source, []*route.Hop{})
|
||||
if err != route.ErrNoRouteHopsProvided {
|
||||
t.Fatalf("expected empty hops error: instead got: %v", err)
|
||||
}
|
||||
}
|
||||
@ -1995,7 +1996,7 @@ func TestRestrictOutgoingChannel(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unable to fetch source node: %v", err)
|
||||
}
|
||||
sourceVertex := Vertex(sourceNode.PubKeyBytes)
|
||||
sourceVertex := route.Vertex(sourceNode.PubKeyBytes)
|
||||
|
||||
const (
|
||||
startingHeight = 100
|
||||
@ -2089,10 +2090,10 @@ func testCltvLimit(t *testing.T, limit uint32, expectedChannel uint64) {
|
||||
if err != nil {
|
||||
t.Fatalf("unable to fetch source node: %v", err)
|
||||
}
|
||||
sourceVertex := Vertex(sourceNode.PubKeyBytes)
|
||||
sourceVertex := route.Vertex(sourceNode.PubKeyBytes)
|
||||
|
||||
ignoredEdges := make(map[EdgeLocator]struct{})
|
||||
ignoredVertexes := make(map[Vertex]struct{})
|
||||
ignoredVertexes := make(map[route.Vertex]struct{})
|
||||
|
||||
paymentAmt := lnwire.NewMSatFromSatoshis(100)
|
||||
target := testGraphInstance.aliasMap["target"]
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/lightningnetwork/lnd/channeldb"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
)
|
||||
|
||||
// paymentSession is used during an HTLC routings session to prune the local
|
||||
@ -19,7 +20,7 @@ import (
|
||||
type paymentSession struct {
|
||||
pruneViewSnapshot graphPruneView
|
||||
|
||||
additionalEdges map[Vertex][]*channeldb.ChannelEdgePolicy
|
||||
additionalEdges map[route.Vertex][]*channeldb.ChannelEdgePolicy
|
||||
|
||||
bandwidthHints map[uint64]lnwire.MilliSatoshi
|
||||
|
||||
@ -32,7 +33,7 @@ type paymentSession struct {
|
||||
mc *missionControl
|
||||
|
||||
haveRoutes bool
|
||||
preBuiltRoutes []*Route
|
||||
preBuiltRoutes []*route.Route
|
||||
|
||||
pathFinder pathFinder
|
||||
}
|
||||
@ -42,7 +43,7 @@ type paymentSession struct {
|
||||
// added is noted, as it'll be pruned from the shared view after a period of
|
||||
// vertexDecay. However, the vertex will remain pruned for the *local* session.
|
||||
// This ensures we don't retry this vertex during the payment attempt.
|
||||
func (p *paymentSession) ReportVertexFailure(v Vertex) {
|
||||
func (p *paymentSession) ReportVertexFailure(v route.Vertex) {
|
||||
log.Debugf("Reporting vertex %v failure to Mission Control", v)
|
||||
|
||||
// First, we'll add the failed vertex to our local prune view snapshot.
|
||||
@ -84,7 +85,7 @@ func (p *paymentSession) ReportEdgeFailure(e *EdgeLocator) {
|
||||
// pruned. This is to prevent nodes from keeping us busy by continuously sending
|
||||
// new channel updates.
|
||||
func (p *paymentSession) ReportEdgePolicyFailure(
|
||||
errSource Vertex, failedEdge *EdgeLocator) {
|
||||
errSource route.Vertex, failedEdge *EdgeLocator) {
|
||||
|
||||
// Check to see if we've already reported a policy related failure for
|
||||
// this channel. If so, then we'll prune out the vertex.
|
||||
@ -111,7 +112,7 @@ func (p *paymentSession) ReportEdgePolicyFailure(
|
||||
//
|
||||
// NOTE: This function is safe for concurrent access.
|
||||
func (p *paymentSession) RequestRoute(payment *LightningPayment,
|
||||
height uint32, finalCltvDelta uint16) (*Route, error) {
|
||||
height uint32, finalCltvDelta uint16) (*route.Route, error) {
|
||||
|
||||
switch {
|
||||
// If we have a set of pre-built routes, then we'll just pop off the
|
||||
@ -175,7 +176,7 @@ func (p *paymentSession) RequestRoute(payment *LightningPayment,
|
||||
|
||||
// With the next candidate path found, we'll attempt to turn this into
|
||||
// a route by applying the time-lock and fee requirements.
|
||||
sourceVertex := Vertex(p.mc.selfNode.PubKeyBytes)
|
||||
sourceVertex := route.Vertex(p.mc.selfNode.PubKeyBytes)
|
||||
route, err := newRoute(
|
||||
payment.Amount, sourceVertex, path, height, finalCltvDelta,
|
||||
)
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/lightningnetwork/lnd/channeldb"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
)
|
||||
|
||||
func TestRequestRoute(t *testing.T) {
|
||||
@ -13,7 +14,7 @@ func TestRequestRoute(t *testing.T) {
|
||||
)
|
||||
|
||||
findPath := func(g *graphParams, r *RestrictParams,
|
||||
source, target Vertex, amt lnwire.MilliSatoshi) (
|
||||
source, target route.Vertex, amt lnwire.MilliSatoshi) (
|
||||
[]*channeldb.ChannelEdgePolicy, error) {
|
||||
|
||||
// We expect find path to receive a cltv limit excluding the
|
||||
|
165
routing/route/route.go
Normal file
165
routing/route/route.go
Normal file
@ -0,0 +1,165 @@
|
||||
package route
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
sphinx "github.com/lightningnetwork/lightning-onion"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
)
|
||||
|
||||
// ErrNoRouteHopsProvided is returned when a caller attempts to construct a new
|
||||
// sphinx packet, but provides an empty set of hops for each route.
|
||||
var ErrNoRouteHopsProvided = fmt.Errorf("empty route hops provided")
|
||||
|
||||
// Vertex is a simple alias for the serialization of a compressed Bitcoin
|
||||
// public key.
|
||||
type Vertex [33]byte
|
||||
|
||||
// NewVertex returns a new Vertex given a public key.
|
||||
func NewVertex(pub *btcec.PublicKey) Vertex {
|
||||
var v Vertex
|
||||
copy(v[:], pub.SerializeCompressed())
|
||||
return v
|
||||
}
|
||||
|
||||
// String returns a human readable version of the Vertex which is the
|
||||
// hex-encoding of the serialized compressed public key.
|
||||
func (v Vertex) String() string {
|
||||
return fmt.Sprintf("%x", v[:])
|
||||
}
|
||||
|
||||
// Hop represents an intermediate or final node of the route. This naming
|
||||
// is in line with the definition given in BOLT #4: Onion Routing Protocol.
|
||||
// The struct houses the channel along which this hop can be reached and
|
||||
// the values necessary to create the HTLC that needs to be sent to the
|
||||
// next hop. It is also used to encode the per-hop payload included within
|
||||
// the Sphinx packet.
|
||||
type Hop struct {
|
||||
// PubKeyBytes is the raw bytes of the public key of the target node.
|
||||
PubKeyBytes Vertex
|
||||
|
||||
// ChannelID is the unique channel ID for the channel. The first 3
|
||||
// bytes are the block height, the next 3 the index within the block,
|
||||
// and the last 2 bytes are the output index for the channel.
|
||||
ChannelID uint64
|
||||
|
||||
// OutgoingTimeLock is the timelock value that should be used when
|
||||
// crafting the _outgoing_ HTLC from this hop.
|
||||
OutgoingTimeLock uint32
|
||||
|
||||
// AmtToForward is the amount that this hop will forward to the next
|
||||
// hop. This value is less than the value that the incoming HTLC
|
||||
// carries as a fee will be subtracted by the hop.
|
||||
AmtToForward lnwire.MilliSatoshi
|
||||
}
|
||||
|
||||
// Route represents a path through the channel graph which runs over one or
|
||||
// more channels in succession. This struct carries all the information
|
||||
// required to craft the Sphinx onion packet, and send the payment along the
|
||||
// first hop in the path. A route is only selected as valid if all the channels
|
||||
// have sufficient capacity to carry the initial payment amount after fees are
|
||||
// accounted for.
|
||||
type Route struct {
|
||||
// TotalTimeLock is the cumulative (final) time lock across the entire
|
||||
// route. This is the CLTV value that should be extended to the first
|
||||
// hop in the route. All other hops will decrement the time-lock as
|
||||
// advertised, leaving enough time for all hops to wait for or present
|
||||
// the payment preimage to complete the payment.
|
||||
TotalTimeLock uint32
|
||||
|
||||
// TotalFees is the sum of the fees paid at each hop within the final
|
||||
// route. In the case of a one-hop payment, this value will be zero as
|
||||
// we don't need to pay a fee to ourself.
|
||||
TotalFees lnwire.MilliSatoshi
|
||||
|
||||
// TotalAmount is the total amount of funds required to complete a
|
||||
// payment over this route. This value includes the cumulative fees at
|
||||
// each hop. As a result, the HTLC extended to the first-hop in the
|
||||
// route will need to have at least this many satoshis, otherwise the
|
||||
// route will fail at an intermediate node due to an insufficient
|
||||
// amount of fees.
|
||||
TotalAmount lnwire.MilliSatoshi
|
||||
|
||||
// SourcePubKey is the pubkey of the node where this route originates
|
||||
// from.
|
||||
SourcePubKey Vertex
|
||||
|
||||
// Hops contains details concerning the specific forwarding details at
|
||||
// each hop.
|
||||
Hops []*Hop
|
||||
}
|
||||
|
||||
// HopFee returns the fee charged by the route hop indicated by hopIndex.
|
||||
func (r *Route) HopFee(hopIndex int) lnwire.MilliSatoshi {
|
||||
var incomingAmt lnwire.MilliSatoshi
|
||||
if hopIndex == 0 {
|
||||
incomingAmt = r.TotalAmount
|
||||
} else {
|
||||
incomingAmt = r.Hops[hopIndex-1].AmtToForward
|
||||
}
|
||||
|
||||
// Fee is calculated as difference between incoming and outgoing amount.
|
||||
return incomingAmt - r.Hops[hopIndex].AmtToForward
|
||||
}
|
||||
|
||||
// ToHopPayloads converts a complete route into the series of per-hop payloads
|
||||
// that is to be encoded within each HTLC using an opaque Sphinx packet.
|
||||
func (r *Route) ToHopPayloads() []sphinx.HopData {
|
||||
hopPayloads := make([]sphinx.HopData, len(r.Hops))
|
||||
|
||||
// For each hop encoded within the route, we'll convert the hop struct
|
||||
// to the matching per-hop payload struct as used by the sphinx
|
||||
// package.
|
||||
for i, hop := range r.Hops {
|
||||
hopPayloads[i] = sphinx.HopData{
|
||||
// TODO(roasbeef): properly set realm, make sphinx type
|
||||
// an enum actually?
|
||||
Realm: 0,
|
||||
ForwardAmount: uint64(hop.AmtToForward),
|
||||
OutgoingCltv: hop.OutgoingTimeLock,
|
||||
}
|
||||
|
||||
// As a base case, the next hop is set to all zeroes in order
|
||||
// to indicate that the "last hop" as no further hops after it.
|
||||
nextHop := uint64(0)
|
||||
|
||||
// If we aren't on the last hop, then we set the "next address"
|
||||
// field to be the channel that directly follows it.
|
||||
if i != len(r.Hops)-1 {
|
||||
nextHop = r.Hops[i+1].ChannelID
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint64(hopPayloads[i].NextAddress[:],
|
||||
nextHop)
|
||||
}
|
||||
|
||||
return hopPayloads
|
||||
}
|
||||
|
||||
// NewRouteFromHops creates a new Route structure from the minimally required
|
||||
// information to perform the payment. It infers fee amounts and populates the
|
||||
// node, chan and prev/next hop maps.
|
||||
func NewRouteFromHops(amtToSend lnwire.MilliSatoshi, timeLock uint32,
|
||||
sourceVertex Vertex, hops []*Hop) (*Route, error) {
|
||||
|
||||
if len(hops) == 0 {
|
||||
return nil, ErrNoRouteHopsProvided
|
||||
}
|
||||
|
||||
// First, we'll create a route struct and populate it with the fields
|
||||
// for which the values are provided as arguments of this function.
|
||||
// TotalFees is determined based on the difference between the amount
|
||||
// that is send from the source and the final amount that is received
|
||||
// by the destination.
|
||||
route := &Route{
|
||||
SourcePubKey: sourceVertex,
|
||||
Hops: hops,
|
||||
TotalTimeLock: timeLock,
|
||||
TotalAmount: amtToSend,
|
||||
TotalFees: amtToSend - hops[len(hops)-1].AmtToForward,
|
||||
}
|
||||
|
||||
return route, nil
|
||||
}
|
@ -25,6 +25,7 @@ import (
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/multimutex"
|
||||
"github.com/lightningnetwork/lnd/routing/chainview"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
"github.com/lightningnetwork/lnd/zpay32"
|
||||
)
|
||||
|
||||
@ -77,11 +78,11 @@ type ChannelGraphSource interface {
|
||||
// for the target node with a more recent timestamp. This method will
|
||||
// also return true if we don't have an active channel announcement for
|
||||
// the target node.
|
||||
IsStaleNode(node Vertex, timestamp time.Time) bool
|
||||
IsStaleNode(node route.Vertex, timestamp time.Time) bool
|
||||
|
||||
// IsPublicNode determines whether the given vertex is seen as a public
|
||||
// node in the graph from the graph's source node's point of view.
|
||||
IsPublicNode(node Vertex) (bool, error)
|
||||
IsPublicNode(node route.Vertex) (bool, error)
|
||||
|
||||
// IsKnownEdge returns true if the graph source already knows of the
|
||||
// passed channel ID either as a live or zombie edge.
|
||||
@ -114,7 +115,7 @@ type ChannelGraphSource interface {
|
||||
// FetchLightningNode attempts to look up a target node by its identity
|
||||
// public key. channeldb.ErrGraphNodeNotFound is returned if the node
|
||||
// doesn't exist within the graph.
|
||||
FetchLightningNode(Vertex) (*channeldb.LightningNode, error)
|
||||
FetchLightningNode(route.Vertex) (*channeldb.LightningNode, error)
|
||||
|
||||
// ForEachNode is used to iterate over every node in the known graph.
|
||||
ForEachNode(func(node *channeldb.LightningNode) error) error
|
||||
@ -236,7 +237,7 @@ type EdgeLocator struct {
|
||||
|
||||
// newEdgeLocatorByPubkeys returns an edgeLocator based on its end point
|
||||
// pubkeys.
|
||||
func newEdgeLocatorByPubkeys(channelID uint64, fromNode, toNode *Vertex) *EdgeLocator {
|
||||
func newEdgeLocatorByPubkeys(channelID uint64, fromNode, toNode *route.Vertex) *EdgeLocator {
|
||||
// Determine direction based on lexicographical ordering of both
|
||||
// pubkeys.
|
||||
var direction uint8
|
||||
@ -973,7 +974,7 @@ func (r *ChannelRouter) networkHandler() {
|
||||
// timestamp. ErrIgnored will be returned if we already have the node, and
|
||||
// ErrOutdated will be returned if we have a timestamp that's after the new
|
||||
// timestamp.
|
||||
func (r *ChannelRouter) assertNodeAnnFreshness(node Vertex,
|
||||
func (r *ChannelRouter) assertNodeAnnFreshness(node route.Vertex,
|
||||
msgTimestamp time.Time) error {
|
||||
|
||||
// If we are not already aware of this node, it means that we don't
|
||||
@ -1307,11 +1308,11 @@ type routingMsg struct {
|
||||
// fee information attached. The set of routes returned may be less than the
|
||||
// initial set of paths as it's possible we drop a route if it can't handle the
|
||||
// total payment flow after fees are calculated.
|
||||
func pathsToFeeSortedRoutes(source Vertex, paths [][]*channeldb.ChannelEdgePolicy,
|
||||
func pathsToFeeSortedRoutes(source route.Vertex, paths [][]*channeldb.ChannelEdgePolicy,
|
||||
finalCLTVDelta uint16, amt lnwire.MilliSatoshi,
|
||||
currentHeight uint32) ([]*Route, error) {
|
||||
currentHeight uint32) ([]*route.Route, error) {
|
||||
|
||||
validRoutes := make([]*Route, 0, len(paths))
|
||||
validRoutes := make([]*route.Route, 0, len(paths))
|
||||
for _, path := range paths {
|
||||
// Attempt to make the path into a route. We snip off the first
|
||||
// hop in the path as it contains a "self-hop" that is inserted
|
||||
@ -1365,9 +1366,9 @@ func pathsToFeeSortedRoutes(source Vertex, paths [][]*channeldb.ChannelEdgePolic
|
||||
// the required fee and time lock values running backwards along the route. The
|
||||
// route that will be ranked the highest is the one with the lowest cumulative
|
||||
// fee along the route.
|
||||
func (r *ChannelRouter) FindRoutes(source, target Vertex,
|
||||
func (r *ChannelRouter) FindRoutes(source, target route.Vertex,
|
||||
amt lnwire.MilliSatoshi, restrictions *RestrictParams, numPaths uint32,
|
||||
finalExpiry ...uint16) ([]*Route, error) {
|
||||
finalExpiry ...uint16) ([]*route.Route, error) {
|
||||
|
||||
var finalCLTVDelta uint16
|
||||
if len(finalExpiry) == 0 {
|
||||
@ -1434,7 +1435,7 @@ func (r *ChannelRouter) FindRoutes(source, target Vertex,
|
||||
// each path. During this process, some paths may be discarded if they
|
||||
// aren't able to support the total satoshis flow once fees have been
|
||||
// factored in.
|
||||
sourceVertex := Vertex(r.selfNode.PubKeyBytes)
|
||||
sourceVertex := route.Vertex(r.selfNode.PubKeyBytes)
|
||||
validRoutes, err := pathsToFeeSortedRoutes(
|
||||
sourceVertex, shortestPaths, finalCLTVDelta, amt,
|
||||
uint32(currentHeight),
|
||||
@ -1456,20 +1457,20 @@ func (r *ChannelRouter) FindRoutes(source, target Vertex,
|
||||
// the onion route specified by the passed layer 3 route. The blob returned
|
||||
// from this function can immediately be included within an HTLC add packet to
|
||||
// be sent to the first hop within the route.
|
||||
func generateSphinxPacket(route *Route, paymentHash []byte) ([]byte,
|
||||
func generateSphinxPacket(rt *route.Route, paymentHash []byte) ([]byte,
|
||||
*sphinx.Circuit, error) {
|
||||
|
||||
// As a sanity check, we'll ensure that the set of hops has been
|
||||
// properly filled in, otherwise, we won't actually be able to
|
||||
// construct a route.
|
||||
if len(route.Hops) == 0 {
|
||||
return nil, nil, ErrNoRouteHopsProvided
|
||||
if len(rt.Hops) == 0 {
|
||||
return nil, nil, route.ErrNoRouteHopsProvided
|
||||
}
|
||||
|
||||
// First obtain all the public keys along the route which are contained
|
||||
// in each hop.
|
||||
nodes := make([]*btcec.PublicKey, len(route.Hops))
|
||||
for i, hop := range route.Hops {
|
||||
nodes := make([]*btcec.PublicKey, len(rt.Hops))
|
||||
for i, hop := range rt.Hops {
|
||||
pub, err := btcec.ParsePubKey(hop.PubKeyBytes[:],
|
||||
btcec.S256())
|
||||
if err != nil {
|
||||
@ -1482,7 +1483,7 @@ func generateSphinxPacket(route *Route, paymentHash []byte) ([]byte,
|
||||
// Next we generate the per-hop payload which gives each node within
|
||||
// the route the necessary information (fees, CLTV value, etc) to
|
||||
// properly forward the payment.
|
||||
hopPayloads := route.ToHopPayloads()
|
||||
hopPayloads := rt.ToHopPayloads()
|
||||
|
||||
log.Tracef("Constructed per-hop payloads for payment_hash=%x: %v",
|
||||
paymentHash[:], newLogClosure(func() string {
|
||||
@ -1530,7 +1531,7 @@ func generateSphinxPacket(route *Route, paymentHash []byte) ([]byte,
|
||||
// final destination.
|
||||
type LightningPayment struct {
|
||||
// Target is the node in which the payment should be routed towards.
|
||||
Target Vertex
|
||||
Target route.Vertex
|
||||
|
||||
// Amount is the value of the payment to send through the network in
|
||||
// milli-satoshis.
|
||||
@ -1586,7 +1587,7 @@ type LightningPayment struct {
|
||||
// will be returned which describes the path the successful payment traversed
|
||||
// within the network to reach the destination. Additionally, the payment
|
||||
// preimage will also be returned.
|
||||
func (r *ChannelRouter) SendPayment(payment *LightningPayment) ([32]byte, *Route, error) {
|
||||
func (r *ChannelRouter) SendPayment(payment *LightningPayment) ([32]byte, *route.Route, error) {
|
||||
// Before starting the HTLC routing attempt, we'll create a fresh
|
||||
// payment session which will report our errors back to mission
|
||||
// control.
|
||||
@ -1607,8 +1608,8 @@ func (r *ChannelRouter) SendPayment(payment *LightningPayment) ([32]byte, *Route
|
||||
// succeeds, then a non-nil Route will be returned which describes the
|
||||
// path the successful payment traversed within the network to reach the
|
||||
// destination. Additionally, the payment preimage will also be returned.
|
||||
func (r *ChannelRouter) SendToRoute(routes []*Route,
|
||||
payment *LightningPayment) ([32]byte, *Route, error) {
|
||||
func (r *ChannelRouter) SendToRoute(routes []*route.Route,
|
||||
payment *LightningPayment) ([32]byte, *route.Route, error) {
|
||||
|
||||
paySession := r.missionControl.NewPaymentSessionFromRoutes(
|
||||
routes,
|
||||
@ -1625,7 +1626,7 @@ func (r *ChannelRouter) SendToRoute(routes []*Route,
|
||||
// within the network to reach the destination. Additionally, the payment
|
||||
// preimage will also be returned.
|
||||
func (r *ChannelRouter) sendPayment(payment *LightningPayment,
|
||||
paySession *paymentSession) ([32]byte, *Route, error) {
|
||||
paySession *paymentSession) ([32]byte, *route.Route, error) {
|
||||
|
||||
log.Tracef("Dispatching route for lightning payment: %v",
|
||||
newLogClosure(func() string {
|
||||
@ -1719,7 +1720,7 @@ func (r *ChannelRouter) sendPayment(payment *LightningPayment,
|
||||
// bool parameter indicates whether this is a final outcome or more attempts
|
||||
// should be made.
|
||||
func (r *ChannelRouter) sendPaymentAttempt(paySession *paymentSession,
|
||||
route *Route, paymentHash [32]byte) ([32]byte, bool, error) {
|
||||
route *route.Route, paymentHash [32]byte) ([32]byte, bool, error) {
|
||||
|
||||
log.Tracef("Attempting to send payment %x, using route: %v",
|
||||
paymentHash, newLogClosure(func() string {
|
||||
@ -1742,7 +1743,7 @@ func (r *ChannelRouter) sendPaymentAttempt(paySession *paymentSession,
|
||||
|
||||
// sendToSwitch sends a payment along the specified route and returns the
|
||||
// obtained preimage.
|
||||
func (r *ChannelRouter) sendToSwitch(route *Route, paymentHash [32]byte) (
|
||||
func (r *ChannelRouter) sendToSwitch(route *route.Route, paymentHash [32]byte) (
|
||||
[32]byte, error) {
|
||||
|
||||
// Generate the raw encoded sphinx packet to be included along
|
||||
@ -1782,7 +1783,7 @@ func (r *ChannelRouter) sendToSwitch(route *Route, paymentHash [32]byte) (
|
||||
// to continue with an alternative route. This is indicated by the boolean
|
||||
// return value.
|
||||
func (r *ChannelRouter) processSendError(paySession *paymentSession,
|
||||
route *Route, err error) bool {
|
||||
rt *route.Route, err error) bool {
|
||||
|
||||
fErr, ok := err.(*htlcswitch.ForwardingError)
|
||||
if !ok {
|
||||
@ -1790,13 +1791,13 @@ func (r *ChannelRouter) processSendError(paySession *paymentSession,
|
||||
}
|
||||
|
||||
errSource := fErr.ErrorSource
|
||||
errVertex := NewVertex(errSource)
|
||||
errVertex := route.NewVertex(errSource)
|
||||
|
||||
log.Tracef("node=%x reported failure when sending htlc", errVertex)
|
||||
|
||||
// Always determine chan id ourselves, because a channel
|
||||
// update with id may not be available.
|
||||
failedEdge, err := getFailedEdge(route, errVertex)
|
||||
failedEdge, err := getFailedEdge(rt, route.Vertex(errVertex))
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
@ -1833,7 +1834,7 @@ func (r *ChannelRouter) processSendError(paySession *paymentSession,
|
||||
}
|
||||
|
||||
paySession.ReportEdgePolicyFailure(
|
||||
NewVertex(errSource), failedEdge,
|
||||
route.NewVertex(errSource), failedEdge,
|
||||
)
|
||||
}
|
||||
|
||||
@ -2007,7 +2008,7 @@ func (r *ChannelRouter) processSendError(paySession *paymentSession,
|
||||
// getFailedEdge tries to locate the failing channel given a route and the
|
||||
// pubkey of the node that sent the error. It will assume that the error is
|
||||
// associated with the outgoing channel of the error node.
|
||||
func getFailedEdge(route *Route, errSource Vertex) (
|
||||
func getFailedEdge(route *route.Route, errSource route.Vertex) (
|
||||
*EdgeLocator, error) {
|
||||
|
||||
hopCount := len(route.Hops)
|
||||
@ -2179,7 +2180,7 @@ func (r *ChannelRouter) GetChannelByID(chanID lnwire.ShortChannelID) (
|
||||
// within the graph.
|
||||
//
|
||||
// NOTE: This method is part of the ChannelGraphSource interface.
|
||||
func (r *ChannelRouter) FetchLightningNode(node Vertex) (*channeldb.LightningNode, error) {
|
||||
func (r *ChannelRouter) FetchLightningNode(node route.Vertex) (*channeldb.LightningNode, error) {
|
||||
pubKey, err := btcec.ParsePubKey(node[:], btcec.S256())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse raw public key: %v", err)
|
||||
@ -2244,7 +2245,7 @@ func (r *ChannelRouter) AddProof(chanID lnwire.ShortChannelID,
|
||||
// target node with a more recent timestamp.
|
||||
//
|
||||
// NOTE: This method is part of the ChannelGraphSource interface.
|
||||
func (r *ChannelRouter) IsStaleNode(node Vertex, timestamp time.Time) bool {
|
||||
func (r *ChannelRouter) IsStaleNode(node route.Vertex, timestamp time.Time) bool {
|
||||
// If our attempt to assert that the node announcement is fresh fails,
|
||||
// then we know that this is actually a stale announcement.
|
||||
return r.assertNodeAnnFreshness(node, timestamp) != nil
|
||||
@ -2254,7 +2255,7 @@ func (r *ChannelRouter) IsStaleNode(node Vertex, timestamp time.Time) bool {
|
||||
// the graph from the graph's source node's point of view.
|
||||
//
|
||||
// NOTE: This method is part of the ChannelGraphSource interface.
|
||||
func (r *ChannelRouter) IsPublicNode(node Vertex) (bool, error) {
|
||||
func (r *ChannelRouter) IsPublicNode(node route.Vertex) (bool, error) {
|
||||
return r.cfg.Graph.IsPublicNode(node)
|
||||
}
|
||||
|
||||
|
@ -19,6 +19,7 @@ import (
|
||||
"github.com/lightningnetwork/lnd/channeldb"
|
||||
"github.com/lightningnetwork/lnd/htlcswitch"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
"github.com/lightningnetwork/lnd/zpay32"
|
||||
)
|
||||
|
||||
@ -31,7 +32,7 @@ type testCtx struct {
|
||||
|
||||
graph *channeldb.ChannelGraph
|
||||
|
||||
aliases map[string]Vertex
|
||||
aliases map[string]route.Vertex
|
||||
|
||||
chain *mockChain
|
||||
|
||||
@ -415,7 +416,7 @@ func TestChannelUpdateValidation(t *testing.T) {
|
||||
|
||||
hop2 := ctx.aliases["c"]
|
||||
|
||||
hops := []*Hop{
|
||||
hops := []*route.Hop{
|
||||
{
|
||||
ChannelID: 1,
|
||||
PubKeyBytes: hop1,
|
||||
@ -426,7 +427,7 @@ func TestChannelUpdateValidation(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
route, err := NewRouteFromHops(
|
||||
rt, err := route.NewRouteFromHops(
|
||||
lnwire.MilliSatoshi(10000), 100,
|
||||
ctx.aliases["a"], hops,
|
||||
)
|
||||
@ -473,7 +474,7 @@ func TestChannelUpdateValidation(t *testing.T) {
|
||||
// Send off the payment request to the router. The specified route
|
||||
// should be attempted and the channel update should be received by
|
||||
// router and ignored because it is missing a valid signature.
|
||||
_, _, err = ctx.router.SendToRoute([]*Route{route}, payment)
|
||||
_, _, err = ctx.router.SendToRoute([]*route.Route{rt}, payment)
|
||||
if err == nil {
|
||||
t.Fatalf("expected route to fail with channel update")
|
||||
}
|
||||
@ -506,7 +507,7 @@ func TestChannelUpdateValidation(t *testing.T) {
|
||||
}
|
||||
|
||||
// Retry the payment using the same route as before.
|
||||
_, _, err = ctx.router.SendToRoute([]*Route{route}, payment)
|
||||
_, _, err = ctx.router.SendToRoute([]*route.Route{rt}, payment)
|
||||
if err == nil {
|
||||
t.Fatalf("expected route to fail with channel update")
|
||||
}
|
||||
@ -718,7 +719,7 @@ func TestSendPaymentErrorNonFinalTimeLockErrors(t *testing.T) {
|
||||
// assertExpectedPath is a helper function that asserts the returned
|
||||
// route properly routes around the failure we've introduced in the
|
||||
// graph.
|
||||
assertExpectedPath := func(retPreImage [32]byte, route *Route) {
|
||||
assertExpectedPath := func(retPreImage [32]byte, route *route.Route) {
|
||||
// The route selected should have two hops
|
||||
if len(route.Hops) != 2 {
|
||||
t.Fatalf("incorrect route length: expected %v got %v", 2,
|
||||
@ -744,12 +745,12 @@ func TestSendPaymentErrorNonFinalTimeLockErrors(t *testing.T) {
|
||||
// Send off the payment request to the router, this payment should
|
||||
// succeed as we should actually go through Pham Nuwen in order to get
|
||||
// to Sophon, even though he has higher fees.
|
||||
paymentPreImage, route, err := ctx.router.SendPayment(&payment)
|
||||
paymentPreImage, rt, err := ctx.router.SendPayment(&payment)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to send payment: %v", err)
|
||||
}
|
||||
|
||||
assertExpectedPath(paymentPreImage, route)
|
||||
assertExpectedPath(paymentPreImage, rt)
|
||||
|
||||
// We'll now modify the error return an IncorrectCltvExpiry error
|
||||
// instead, this should result in the same behavior of roasbeef routing
|
||||
@ -778,12 +779,12 @@ func TestSendPaymentErrorNonFinalTimeLockErrors(t *testing.T) {
|
||||
|
||||
// Once again, Roasbeef should route around Goku since they disagree
|
||||
// w.r.t to the block height, and instead go through Pham Nuwen.
|
||||
paymentPreImage, route, err = ctx.router.SendPayment(&payment)
|
||||
paymentPreImage, rt, err = ctx.router.SendPayment(&payment)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to send payment: %v", err)
|
||||
}
|
||||
|
||||
assertExpectedPath(paymentPreImage, route)
|
||||
assertExpectedPath(paymentPreImage, rt)
|
||||
}
|
||||
|
||||
// TestSendPaymentErrorPathPruning tests that the send of candidate routes
|
||||
@ -902,25 +903,25 @@ func TestSendPaymentErrorPathPruning(t *testing.T) {
|
||||
// This shouldn't return an error, as we'll make a payment attempt via
|
||||
// the satoshi channel based on the assumption that there might be an
|
||||
// intermittent issue with the roasbeef <-> lioji channel.
|
||||
paymentPreImage, route, err := ctx.router.SendPayment(&payment)
|
||||
paymentPreImage, rt, err := ctx.router.SendPayment(&payment)
|
||||
if err != nil {
|
||||
t.Fatalf("unable send payment: %v", err)
|
||||
}
|
||||
|
||||
// This path should go: roasbeef -> satoshi -> luoji
|
||||
if len(route.Hops) != 2 {
|
||||
if len(rt.Hops) != 2 {
|
||||
t.Fatalf("incorrect route length: expected %v got %v", 2,
|
||||
len(route.Hops))
|
||||
len(rt.Hops))
|
||||
}
|
||||
if !bytes.Equal(paymentPreImage[:], preImage[:]) {
|
||||
t.Fatalf("incorrect preimage used: expected %x got %x",
|
||||
preImage[:], paymentPreImage[:])
|
||||
}
|
||||
if route.Hops[0].PubKeyBytes != ctx.aliases["satoshi"] {
|
||||
if rt.Hops[0].PubKeyBytes != ctx.aliases["satoshi"] {
|
||||
|
||||
t.Fatalf("route should go through satoshi as first hop, "+
|
||||
"instead passes through: %v",
|
||||
getAliasFromPubKey(route.Hops[0].PubKeyBytes,
|
||||
getAliasFromPubKey(rt.Hops[0].PubKeyBytes,
|
||||
ctx.aliases))
|
||||
}
|
||||
|
||||
@ -944,16 +945,16 @@ func TestSendPaymentErrorPathPruning(t *testing.T) {
|
||||
return preImage, nil
|
||||
}
|
||||
|
||||
paymentPreImage, route, err = ctx.router.SendPayment(&payment)
|
||||
paymentPreImage, rt, err = ctx.router.SendPayment(&payment)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to send payment: %v", err)
|
||||
}
|
||||
|
||||
// This should succeed finally. The route selected should have two
|
||||
// hops.
|
||||
if len(route.Hops) != 2 {
|
||||
if len(rt.Hops) != 2 {
|
||||
t.Fatalf("incorrect route length: expected %v got %v", 2,
|
||||
len(route.Hops))
|
||||
len(rt.Hops))
|
||||
}
|
||||
|
||||
// The preimage should match up with the once created above.
|
||||
@ -963,11 +964,11 @@ func TestSendPaymentErrorPathPruning(t *testing.T) {
|
||||
}
|
||||
|
||||
// The route should have satoshi as the first hop.
|
||||
if route.Hops[0].PubKeyBytes != ctx.aliases["satoshi"] {
|
||||
if rt.Hops[0].PubKeyBytes != ctx.aliases["satoshi"] {
|
||||
|
||||
t.Fatalf("route should go through satoshi as first hop, "+
|
||||
"instead passes through: %v",
|
||||
getAliasFromPubKey(route.Hops[0].PubKeyBytes,
|
||||
getAliasFromPubKey(rt.Hops[0].PubKeyBytes,
|
||||
ctx.aliases))
|
||||
}
|
||||
}
|
||||
@ -1341,7 +1342,7 @@ func TestAddEdgeUnknownVertexes(t *testing.T) {
|
||||
// We should now be able to find two routes to node 2.
|
||||
paymentAmt := lnwire.NewMSatFromSatoshis(100)
|
||||
targetNode := priv2.PubKey()
|
||||
var targetPubKeyBytes Vertex
|
||||
var targetPubKeyBytes route.Vertex
|
||||
copy(targetPubKeyBytes[:], targetNode.SerializeCompressed())
|
||||
routes, err := ctx.router.FindRoutes(
|
||||
ctx.router.selfNode.PubKeyBytes,
|
||||
@ -2512,9 +2513,9 @@ func TestIsStaleEdgePolicy(t *testing.T) {
|
||||
func TestEmptyRoutesGenerateSphinxPacket(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
emptyRoute := &Route{}
|
||||
emptyRoute := &route.Route{}
|
||||
_, _, err := generateSphinxPacket(emptyRoute, testHash[:])
|
||||
if err != ErrNoRouteHopsProvided {
|
||||
if err != route.ErrNoRouteHopsProvided {
|
||||
t.Fatalf("expected empty hops error: instead got: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/lightningnetwork/lnd/channeldb"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
)
|
||||
|
||||
// ErrVBarrierShuttingDown signals that the barrier has been requested to
|
||||
@ -42,7 +43,7 @@ type ValidationBarrier struct {
|
||||
// nodeAnnDependencies tracks any pending NodeAnnouncement validation
|
||||
// jobs which should wait until the completion of the
|
||||
// ChannelAnnouncement before proceeding.
|
||||
nodeAnnDependencies map[Vertex]chan struct{}
|
||||
nodeAnnDependencies map[route.Vertex]chan struct{}
|
||||
|
||||
quit chan struct{}
|
||||
sync.Mutex
|
||||
@ -57,7 +58,7 @@ func NewValidationBarrier(numActiveReqs int,
|
||||
v := &ValidationBarrier{
|
||||
chanAnnFinSignal: make(map[lnwire.ShortChannelID]chan struct{}),
|
||||
chanEdgeDependencies: make(map[lnwire.ShortChannelID]chan struct{}),
|
||||
nodeAnnDependencies: make(map[Vertex]chan struct{}),
|
||||
nodeAnnDependencies: make(map[route.Vertex]chan struct{}),
|
||||
quit: quitChan,
|
||||
}
|
||||
|
||||
@ -110,8 +111,8 @@ func (v *ValidationBarrier) InitJobDependencies(job interface{}) {
|
||||
v.chanAnnFinSignal[msg.ShortChannelID] = annFinCond
|
||||
v.chanEdgeDependencies[msg.ShortChannelID] = annFinCond
|
||||
|
||||
v.nodeAnnDependencies[Vertex(msg.NodeID1)] = annFinCond
|
||||
v.nodeAnnDependencies[Vertex(msg.NodeID2)] = annFinCond
|
||||
v.nodeAnnDependencies[route.Vertex(msg.NodeID1)] = annFinCond
|
||||
v.nodeAnnDependencies[route.Vertex(msg.NodeID2)] = annFinCond
|
||||
}
|
||||
case *channeldb.ChannelEdgeInfo:
|
||||
|
||||
@ -122,8 +123,8 @@ func (v *ValidationBarrier) InitJobDependencies(job interface{}) {
|
||||
v.chanAnnFinSignal[shortID] = annFinCond
|
||||
v.chanEdgeDependencies[shortID] = annFinCond
|
||||
|
||||
v.nodeAnnDependencies[Vertex(msg.NodeKey1Bytes)] = annFinCond
|
||||
v.nodeAnnDependencies[Vertex(msg.NodeKey2Bytes)] = annFinCond
|
||||
v.nodeAnnDependencies[route.Vertex(msg.NodeKey1Bytes)] = annFinCond
|
||||
v.nodeAnnDependencies[route.Vertex(msg.NodeKey2Bytes)] = annFinCond
|
||||
}
|
||||
|
||||
// These other types don't have any dependants, so no further
|
||||
@ -174,12 +175,12 @@ func (v *ValidationBarrier) WaitForDependants(job interface{}) error {
|
||||
shortID := lnwire.NewShortChanIDFromInt(msg.ChannelID)
|
||||
signal, ok = v.chanEdgeDependencies[shortID]
|
||||
case *channeldb.LightningNode:
|
||||
vertex := Vertex(msg.PubKeyBytes)
|
||||
vertex := route.Vertex(msg.PubKeyBytes)
|
||||
signal, ok = v.nodeAnnDependencies[vertex]
|
||||
case *lnwire.ChannelUpdate:
|
||||
signal, ok = v.chanEdgeDependencies[msg.ShortChannelID]
|
||||
case *lnwire.NodeAnnouncement:
|
||||
vertex := Vertex(msg.NodeID)
|
||||
vertex := route.Vertex(msg.NodeID)
|
||||
signal, ok = v.nodeAnnDependencies[vertex]
|
||||
|
||||
// Other types of jobs can be executed immediately, so we'll just
|
||||
@ -243,9 +244,9 @@ func (v *ValidationBarrier) SignalDependants(job interface{}) {
|
||||
// map, as if we reach this point, then all dependants have already
|
||||
// finished executing and we can proceed.
|
||||
case *channeldb.LightningNode:
|
||||
delete(v.nodeAnnDependencies, Vertex(msg.PubKeyBytes))
|
||||
delete(v.nodeAnnDependencies, route.Vertex(msg.PubKeyBytes))
|
||||
case *lnwire.NodeAnnouncement:
|
||||
delete(v.nodeAnnDependencies, Vertex(msg.NodeID))
|
||||
delete(v.nodeAnnDependencies, route.Vertex(msg.NodeID))
|
||||
case *lnwire.ChannelUpdate:
|
||||
delete(v.chanEdgeDependencies, msg.ShortChannelID)
|
||||
case *channeldb.ChannelEdgePolicy:
|
||||
|
37
rpcserver.go
37
rpcserver.go
@ -17,6 +17,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
|
||||
"github.com/btcsuite/btcd/blockchain"
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
@ -2720,7 +2721,7 @@ func (r *rpcServer) SubscribeChannelEvents(req *lnrpc.ChannelEventSubscription,
|
||||
|
||||
// savePayment saves a successfully completed payment to the database for
|
||||
// historical record keeping.
|
||||
func (r *rpcServer) savePayment(route *routing.Route,
|
||||
func (r *rpcServer) savePayment(route *route.Route,
|
||||
amount lnwire.MilliSatoshi, preImage []byte) error {
|
||||
|
||||
paymentPath := make([][33]byte, len(route.Hops))
|
||||
@ -2770,7 +2771,7 @@ type paymentStream struct {
|
||||
// lnrpc.SendToRouteRequest can be passed to sendPayment.
|
||||
type rpcPaymentRequest struct {
|
||||
*lnrpc.SendRequest
|
||||
routes []*routing.Route
|
||||
routes []*route.Route
|
||||
}
|
||||
|
||||
// calculateFeeLimit returns the fee limit in millisatoshis. If a percentage
|
||||
@ -2860,9 +2861,9 @@ func unmarshallSendToRouteRequest(req *lnrpc.SendToRouteRequest,
|
||||
return nil, fmt.Errorf("cannot use both route and routes field")
|
||||
}
|
||||
|
||||
var routes []*routing.Route
|
||||
var routes []*route.Route
|
||||
if len(req.Routes) > 0 {
|
||||
routes = make([]*routing.Route, len(req.Routes))
|
||||
routes = make([]*route.Route, len(req.Routes))
|
||||
for i, rpcroute := range req.Routes {
|
||||
route, err := unmarshallRoute(rpcroute, graph)
|
||||
if err != nil {
|
||||
@ -2871,11 +2872,11 @@ func unmarshallSendToRouteRequest(req *lnrpc.SendToRouteRequest,
|
||||
routes[i] = route
|
||||
}
|
||||
} else {
|
||||
route, err := unmarshallRoute(req.Route, graph)
|
||||
rt, err := unmarshallRoute(req.Route, graph)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
routes = []*routing.Route{route}
|
||||
routes = []*route.Route{rt}
|
||||
}
|
||||
|
||||
return &rpcPaymentRequest{
|
||||
@ -2896,13 +2897,13 @@ type rpcPaymentIntent struct {
|
||||
msat lnwire.MilliSatoshi
|
||||
feeLimit lnwire.MilliSatoshi
|
||||
cltvLimit *uint32
|
||||
dest routing.Vertex
|
||||
dest route.Vertex
|
||||
rHash [32]byte
|
||||
cltvDelta uint16
|
||||
routeHints [][]zpay32.HopHint
|
||||
outgoingChannelID *uint64
|
||||
|
||||
routes []*routing.Route
|
||||
routes []*route.Route
|
||||
}
|
||||
|
||||
// extractPaymentIntent attempts to parse the complete details required to
|
||||
@ -3064,7 +3065,7 @@ func extractPaymentIntent(rpcPayReq *rpcPaymentRequest) (rpcPaymentIntent, error
|
||||
}
|
||||
|
||||
type paymentIntentResponse struct {
|
||||
Route *routing.Route
|
||||
Route *route.Route
|
||||
Preimage [32]byte
|
||||
Err error
|
||||
}
|
||||
@ -3082,7 +3083,7 @@ func (r *rpcServer) dispatchPaymentIntent(
|
||||
// we'll get a non-nil error.
|
||||
var (
|
||||
preImage [32]byte
|
||||
route *routing.Route
|
||||
route *route.Route
|
||||
routerErr error
|
||||
)
|
||||
|
||||
@ -3887,7 +3888,7 @@ func (r *rpcServer) QueryRoutes(ctx context.Context,
|
||||
// retrieve both endpoints and determine the hop pubkey using the previous hop
|
||||
// pubkey. If the channel is unknown, an error is returned.
|
||||
func unmarshallHopByChannelLookup(graph *channeldb.ChannelGraph, hop *lnrpc.Hop,
|
||||
prevPubKeyBytes [33]byte) (*routing.Hop, error) {
|
||||
prevPubKeyBytes [33]byte) (*route.Hop, error) {
|
||||
|
||||
// Discard edge policies, because they may be nil.
|
||||
edgeInfo, _, _, err := graph.FetchChannelEdgesByID(hop.ChanId)
|
||||
@ -3906,7 +3907,7 @@ func unmarshallHopByChannelLookup(graph *channeldb.ChannelGraph, hop *lnrpc.Hop,
|
||||
return nil, fmt.Errorf("channel edge does not match expected node")
|
||||
}
|
||||
|
||||
return &routing.Hop{
|
||||
return &route.Hop{
|
||||
OutgoingTimeLock: hop.Expiry,
|
||||
AmtToForward: lnwire.MilliSatoshi(hop.AmtToForwardMsat),
|
||||
PubKeyBytes: pubKeyBytes,
|
||||
@ -3917,7 +3918,7 @@ func unmarshallHopByChannelLookup(graph *channeldb.ChannelGraph, hop *lnrpc.Hop,
|
||||
// unmarshallKnownPubkeyHop unmarshalls an rpc hop that contains the hop pubkey.
|
||||
// The channel graph doesn't need to be queried because all information required
|
||||
// for sending the payment is present.
|
||||
func unmarshallKnownPubkeyHop(hop *lnrpc.Hop) (*routing.Hop, error) {
|
||||
func unmarshallKnownPubkeyHop(hop *lnrpc.Hop) (*route.Hop, error) {
|
||||
pubKey, err := hex.DecodeString(hop.PubKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot decode pubkey %s", hop.PubKey)
|
||||
@ -3926,7 +3927,7 @@ func unmarshallKnownPubkeyHop(hop *lnrpc.Hop) (*routing.Hop, error) {
|
||||
var pubKeyBytes [33]byte
|
||||
copy(pubKeyBytes[:], pubKey)
|
||||
|
||||
return &routing.Hop{
|
||||
return &route.Hop{
|
||||
OutgoingTimeLock: hop.Expiry,
|
||||
AmtToForward: lnwire.MilliSatoshi(hop.AmtToForwardMsat),
|
||||
PubKeyBytes: pubKeyBytes,
|
||||
@ -3937,7 +3938,7 @@ func unmarshallKnownPubkeyHop(hop *lnrpc.Hop) (*routing.Hop, error) {
|
||||
// unmarshallHop unmarshalls an rpc hop that may or may not contain a node
|
||||
// pubkey.
|
||||
func unmarshallHop(graph *channeldb.ChannelGraph, hop *lnrpc.Hop,
|
||||
prevNodePubKey [33]byte) (*routing.Hop, error) {
|
||||
prevNodePubKey [33]byte) (*route.Hop, error) {
|
||||
|
||||
if hop.PubKey == "" {
|
||||
// If no pub key is given of the hop, the local channel
|
||||
@ -3952,7 +3953,7 @@ func unmarshallHop(graph *channeldb.ChannelGraph, hop *lnrpc.Hop,
|
||||
// unmarshallRoute unmarshalls an rpc route. For hops that don't specify a
|
||||
// pubkey, the channel graph is queried.
|
||||
func unmarshallRoute(rpcroute *lnrpc.Route,
|
||||
graph *channeldb.ChannelGraph) (*routing.Route, error) {
|
||||
graph *channeldb.ChannelGraph) (*route.Route, error) {
|
||||
|
||||
sourceNode, err := graph.SourceNode()
|
||||
if err != nil {
|
||||
@ -3962,7 +3963,7 @@ func unmarshallRoute(rpcroute *lnrpc.Route,
|
||||
|
||||
prevNodePubKey := sourceNode.PubKeyBytes
|
||||
|
||||
hops := make([]*routing.Hop, len(rpcroute.Hops))
|
||||
hops := make([]*route.Hop, len(rpcroute.Hops))
|
||||
for i, hop := range rpcroute.Hops {
|
||||
routeHop, err := unmarshallHop(graph,
|
||||
hop, prevNodePubKey)
|
||||
@ -3975,7 +3976,7 @@ func unmarshallRoute(rpcroute *lnrpc.Route,
|
||||
prevNodePubKey = routeHop.PubKeyBytes
|
||||
}
|
||||
|
||||
route, err := routing.NewRouteFromHops(
|
||||
route, err := route.NewRouteFromHops(
|
||||
lnwire.MilliSatoshi(rpcroute.TotalAmtMsat),
|
||||
rpcroute.TotalTimeLock,
|
||||
sourceNode.PubKeyBytes,
|
||||
|
@ -44,6 +44,7 @@ import (
|
||||
"github.com/lightningnetwork/lnd/netann"
|
||||
"github.com/lightningnetwork/lnd/pool"
|
||||
"github.com/lightningnetwork/lnd/routing"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
"github.com/lightningnetwork/lnd/sweep"
|
||||
"github.com/lightningnetwork/lnd/ticker"
|
||||
"github.com/lightningnetwork/lnd/tor"
|
||||
@ -2063,7 +2064,7 @@ func (s *server) prunePersistentPeerConnection(compressedPubKey [33]byte) {
|
||||
// the target peers.
|
||||
//
|
||||
// NOTE: This function is safe for concurrent access.
|
||||
func (s *server) BroadcastMessage(skips map[routing.Vertex]struct{},
|
||||
func (s *server) BroadcastMessage(skips map[route.Vertex]struct{},
|
||||
msgs ...lnwire.Message) error {
|
||||
|
||||
srvrLog.Debugf("Broadcasting %v messages", len(msgs))
|
||||
|
Loading…
Reference in New Issue
Block a user