autopilot: add greedy "TopK" centrality heuristic

This commit creates a new autopilot heuristic which simply returns
normalized betweenness centrality values for the current graph. This
new heuristic will make it possible to prefer nodes with large
centrality when we're trying to open channels. The heuristic is also
somewhat dumb as it doesn't try to figure out the best nodes, as that'd
require adding ghost edges to the graph recalculating the centrality as
many times as many nodes there are (minus the one we already have
channels with).
This commit is contained in:
Andras Banki-Horvath 2020-06-17 18:55:28 +02:00
parent 82ddccee0b
commit 8373b2ad20
No known key found for this signature in database
GPG Key ID: 80E5375C094198D8
2 changed files with 202 additions and 0 deletions

@ -0,0 +1,93 @@
package autopilot
import (
"runtime"
"github.com/btcsuite/btcutil"
)
// TopCentrality is a simple greedy technique to create connections to nodes
// with the top betweenness centrality value. This algorithm is usually
// referred to as TopK in the literature. The idea is that by opening channels
// to nodes with top betweenness centrality we also increase our own betweenness
// centrality (given we already have at least one channel, or create at least
// two new channels).
// A different and much better approach is instead of selecting nodes with top
// centrality value, we extend the graph in a loop by inserting a new non
// existing edge and recalculate the betweenness centrality of each node. This
// technique is usually referred to as "greedy" algorithm and gives better
// results than TopK but is considerably slower too.
type TopCentrality struct {
centralityMetric *BetweennessCentrality
}
// A compile time assertion to ensure TopCentrality meets the
// AttachmentHeuristic interface.
var _ AttachmentHeuristic = (*TopCentrality)(nil)
// NewTopCentrality constructs and returns a new TopCentrality heuristic.
func NewTopCentrality() *TopCentrality {
metric, err := NewBetweennessCentralityMetric(
runtime.NumCPU(),
)
if err != nil {
panic(err)
}
return &TopCentrality{
centralityMetric: metric,
}
}
// Name returns the name of the heuristic.
func (g *TopCentrality) Name() string {
return "top_centrality"
}
// NodeScores will return a [0,1] normalized map of scores for the given nodes
// except for the ones we already have channels with. The scores will simply
// be the betweenness centrality values of the nodes.
// As our current implementation of betweenness centrality is non-incremental,
// NodeScores will recalculate the centrality values on every call, which is
// slow for large graphs.
func (g *TopCentrality) NodeScores(graph ChannelGraph, chans []Channel,
chanSize btcutil.Amount, nodes map[NodeID]struct{}) (
map[NodeID]*NodeScore, error) {
// Calculate betweenness centrality for the whole graph.
if err := g.centralityMetric.Refresh(graph); err != nil {
return nil, err
}
normalize := true
centrality := g.centralityMetric.GetMetric(normalize)
// Create a map of the existing peers for faster filtering.
existingPeers := make(map[NodeID]struct{})
for _, c := range chans {
existingPeers[c.Node] = struct{}{}
}
result := make(map[NodeID]*NodeScore, len(nodes))
for nodeID := range nodes {
// Skip nodes we already have channel with.
if _, ok := existingPeers[nodeID]; ok {
continue
}
// Skip passed nodes not in the graph. This could happen if
// the graph changed before computing the centrality values as
// the nodes we iterate are prefiltered by the autopilot agent.
score, ok := centrality[nodeID]
if !ok {
continue
}
result[nodeID] = &NodeScore{
NodeID: nodeID,
Score: score,
}
}
return result, nil
}

@ -0,0 +1,109 @@
package autopilot
import (
"testing"
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcutil"
"github.com/stretchr/testify/require"
)
// testTopCentrality is subtest helper to which given the passed graph and
// channels creates the expected centrality score set and checks that the
// calculated score set matches it.
func testTopCentrality(t *testing.T, graph testGraph,
graphNodes map[int]*btcec.PublicKey, channelsWith []int) {
topCentrality := NewTopCentrality()
var channels []Channel
for _, ch := range channelsWith {
channels = append(channels, Channel{
Node: NewNodeID(graphNodes[ch]),
})
}
// Start iteration from -1 to also test the case where the node set
// is empty.
for i := -1; i < len(graphNodes); i++ {
nodes := make(map[NodeID]struct{})
expected := make(map[NodeID]*NodeScore)
for j := 0; j <= i; j++ {
// Add node to the interest set.
nodeID := NewNodeID(graphNodes[j])
nodes[nodeID] = struct{}{}
// Add to the expected set unless it's a node we have
// a channel with.
haveChannel := false
for _, ch := range channels {
if nodeID == ch.Node {
haveChannel = true
break
}
}
if !haveChannel {
score := normalizedTestGraphCentrality[j]
expected[nodeID] = &NodeScore{
NodeID: nodeID,
Score: score,
}
}
}
const chanSize = btcutil.SatoshiPerBitcoin
// Attempt to get centrality scores and expect
// that the result equals with the expected set.
scores, err := topCentrality.NodeScores(
graph, channels, chanSize, nodes,
)
require.NoError(t, err)
require.Equal(t, expected, scores)
}
}
// TestTopCentrality tests that we return the correct normalized centralitiy
// values given a non empty graph, and given our node has an increasing amount
// of channels from 0 to N-1 simulating the whole range from non-connected to
// fully connected.
func TestTopCentrality(t *testing.T) {
// Generate channels: {}, {0}, {0, 1}, ... {0, 1, ..., N-1}
channelsWith := [][]int{nil}
for i := 0; i < centralityTestGraph.nodes; i++ {
channels := make([]int, i+1)
for j := 0; j <= i; j++ {
channels[j] = j
}
channelsWith = append(channelsWith, channels)
}
for _, chanGraph := range chanGraphs {
chanGraph := chanGraph
success := t.Run(chanGraph.name, func(t *testing.T) {
t.Parallel()
graph, cleanup, err := chanGraph.genFunc()
require.NoError(t, err, "unable to create graph")
if cleanup != nil {
defer cleanup()
}
// Build the test graph.
graphNodes := buildTestGraph(
t, graph, centralityTestGraph,
)
for _, chans := range channelsWith {
testTopCentrality(t, graph, graphNodes, chans)
}
})
require.True(t, success)
}
}