From 906b0b707b1c3ee2daaac302f765bab86baa32a8 Mon Sep 17 00:00:00 2001 From: Andras Banki-Horvath Date: Wed, 17 Jun 2020 15:49:07 +0200 Subject: [PATCH 1/5] atpl: remove unneeded extra filter on address availability This commit removes an extra filter on address availability which is not needed as the scored nodes are a already prefiltered subset of the whole graph where address availability has already been checked. --- autopilot/agent.go | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/autopilot/agent.go b/autopilot/agent.go index 837716eb..a7e34fa9 100644 --- a/autopilot/agent.go +++ b/autopilot/agent.go @@ -657,17 +657,6 @@ func (a *Agent) openChans(availableFunds btcutil.Amount, numChans uint32, log.Tracef("Creating attachment directive for chosen node %x", nID[:]) - // Add addresses to the candidates. - addrs := addresses[nID] - - // If the node has no known addresses, we cannot connect to it, - // so we'll skip it. - if len(addrs) == 0 { - log.Tracef("Skipping scored node %x with no addresses", - nID[:]) - continue - } - // Track the available funds we have left. if availableFunds < chanSize { chanSize = availableFunds @@ -685,7 +674,7 @@ func (a *Agent) openChans(availableFunds btcutil.Amount, numChans uint32, chanCandidates[nID] = &AttachmentDirective{ NodeID: nID, ChanAmt: chanSize, - Addrs: addrs, + Addrs: addresses[nID], } } From 82ddccee0b25b72441d4abaaa92b85006bad48a1 Mon Sep 17 00:00:00 2001 From: Andras Banki-Horvath Date: Wed, 17 Jun 2020 18:03:31 +0200 Subject: [PATCH 2/5] autopilot+test: make centrality test data available for other tests --- autopilot/betweenness_centrality_test.go | 68 +++--------------------- autopilot/centrality_testdata_test.go | 68 ++++++++++++++++++++++++ 2 files changed, 75 insertions(+), 61 deletions(-) create mode 100644 autopilot/centrality_testdata_test.go diff --git a/autopilot/betweenness_centrality_test.go b/autopilot/betweenness_centrality_test.go index 76ece3ce..75893c38 100644 --- a/autopilot/betweenness_centrality_test.go +++ b/autopilot/betweenness_centrality_test.go @@ -3,9 +3,6 @@ package autopilot import ( "fmt" "testing" - - "github.com/btcsuite/btcd/btcec" - "github.com/btcsuite/btcutil" ) func TestBetweennessCentralityMetricConstruction(t *testing.T) { @@ -65,55 +62,8 @@ func TestBetweennessCentralityEmptyGraph(t *testing.T) { } } -// testGraphDesc is a helper type to describe a test graph. -type testGraphDesc struct { - nodes int - edges map[int][]int -} - -// buildTestGraph builds a test graph from a passed graph desriptor. -func buildTestGraph(t *testing.T, - graph testGraph, desc testGraphDesc) map[int]*btcec.PublicKey { - - nodes := make(map[int]*btcec.PublicKey) - - for i := 0; i < desc.nodes; i++ { - key, err := graph.addRandNode() - if err != nil { - t.Fatalf("cannot create random node") - } - - nodes[i] = key - } - - const chanCapacity = btcutil.SatoshiPerBitcoin - for u, neighbors := range desc.edges { - for _, v := range neighbors { - _, _, err := graph.addRandChannel(nodes[u], nodes[v], chanCapacity) - if err != nil { - t.Fatalf("unexpected error adding random channel: %v", err) - } - } - } - - return nodes -} - // Test betweenness centrality calculating using an example graph. func TestBetweennessCentralityWithNonEmptyGraph(t *testing.T) { - graphDesc := testGraphDesc{ - nodes: 9, - edges: map[int][]int{ - 0: {1, 2, 3}, - 1: {2}, - 2: {3}, - 3: {4, 5}, - 4: {5, 6, 7}, - 5: {6, 7}, - 6: {7, 8}, - }, - } - workers := []int{1, 3, 9, 100} results := []struct { @@ -121,16 +71,12 @@ func TestBetweennessCentralityWithNonEmptyGraph(t *testing.T) { centrality []float64 }{ { - normalize: true, - centrality: []float64{ - 0.2, 0.0, 0.2, 1.0, 0.4, 0.4, 7.0 / 15.0, 0.0, 0.0, - }, + normalize: true, + centrality: normalizedTestGraphCentrality, }, { - normalize: false, - centrality: []float64{ - 3.0, 0.0, 3.0, 15.0, 6.0, 6.0, 7.0, 0.0, 0.0, - }, + normalize: false, + centrality: testGraphCentrality, }, } @@ -155,7 +101,7 @@ func TestBetweennessCentralityWithNonEmptyGraph(t *testing.T) { "positive number of workers") } - graphNodes := buildTestGraph(t1, graph, graphDesc) + graphNodes := buildTestGraph(t1, graph, centralityTestGraph) if err := centralityMetric.Refresh(graph); err != nil { t1.Fatalf("error while calculating betweeness centrality") } @@ -163,9 +109,9 @@ func TestBetweennessCentralityWithNonEmptyGraph(t *testing.T) { expected := expected centrality := centralityMetric.GetMetric(expected.normalize) - if len(centrality) != graphDesc.nodes { + if len(centrality) != centralityTestGraph.nodes { t.Fatalf("expected %v values, got: %v", - graphDesc.nodes, len(centrality)) + centralityTestGraph.nodes, len(centrality)) } for node, nodeCentrality := range expected.centrality { diff --git a/autopilot/centrality_testdata_test.go b/autopilot/centrality_testdata_test.go new file mode 100644 index 00000000..4675fa4d --- /dev/null +++ b/autopilot/centrality_testdata_test.go @@ -0,0 +1,68 @@ +package autopilot + +import ( + "testing" + + "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcutil" + "github.com/stretchr/testify/require" +) + +// testGraphDesc is a helper type to describe a test graph. +type testGraphDesc struct { + nodes int + edges map[int][]int +} + +var centralityTestGraph = testGraphDesc{ + nodes: 9, + edges: map[int][]int{ + 0: {1, 2, 3}, + 1: {2}, + 2: {3}, + 3: {4, 5}, + 4: {5, 6, 7}, + 5: {6, 7}, + 6: {7, 8}, + }, +} + +var testGraphCentrality = []float64{ + 3.0, 0.0, 3.0, 15.0, 6.0, 6.0, 7.0, 0.0, 0.0, +} + +var normalizedTestGraphCentrality = []float64{ + 0.2, 0.0, 0.2, 1.0, 0.4, 0.4, 7.0 / 15.0, 0.0, 0.0, +} + +// buildTestGraph builds a test graph from a passed graph desriptor. +func buildTestGraph(t *testing.T, + graph testGraph, desc testGraphDesc) map[int]*btcec.PublicKey { + + nodes := make(map[int]*btcec.PublicKey) + + for i := 0; i < desc.nodes; i++ { + key, err := graph.addRandNode() + require.NoError(t, err, "cannot create random node") + + nodes[i] = key + } + + const chanCapacity = btcutil.SatoshiPerBitcoin + for u, neighbors := range desc.edges { + for _, v := range neighbors { + _, _, err := graph.addRandChannel( + nodes[u], nodes[v], chanCapacity, + ) + require.NoError(t, err, + "unexpected error adding random channel", + ) + if err != nil { + t.Fatalf("unexpected error adding"+ + "random channel: %v", err) + } + } + } + + return nodes +} From 8373b2ad20dfb7c83b2f1c17509adef254e6f28d Mon Sep 17 00:00:00 2001 From: Andras Banki-Horvath Date: Wed, 17 Jun 2020 18:55:28 +0200 Subject: [PATCH 3/5] autopilot: add greedy "TopK" centrality heuristic This commit creates a new autopilot heuristic which simply returns normalized betweenness centrality values for the current graph. This new heuristic will make it possible to prefer nodes with large centrality when we're trying to open channels. The heuristic is also somewhat dumb as it doesn't try to figure out the best nodes, as that'd require adding ghost edges to the graph recalculating the centrality as many times as many nodes there are (minus the one we already have channels with). --- autopilot/top_centrality.go | 93 ++++++++++++++++++++++++++ autopilot/top_centrality_test.go | 109 +++++++++++++++++++++++++++++++ 2 files changed, 202 insertions(+) create mode 100644 autopilot/top_centrality.go create mode 100644 autopilot/top_centrality_test.go diff --git a/autopilot/top_centrality.go b/autopilot/top_centrality.go new file mode 100644 index 00000000..da6dcf38 --- /dev/null +++ b/autopilot/top_centrality.go @@ -0,0 +1,93 @@ +package autopilot + +import ( + "runtime" + + "github.com/btcsuite/btcutil" +) + +// TopCentrality is a simple greedy technique to create connections to nodes +// with the top betweenness centrality value. This algorithm is usually +// referred to as TopK in the literature. The idea is that by opening channels +// to nodes with top betweenness centrality we also increase our own betweenness +// centrality (given we already have at least one channel, or create at least +// two new channels). +// A different and much better approach is instead of selecting nodes with top +// centrality value, we extend the graph in a loop by inserting a new non +// existing edge and recalculate the betweenness centrality of each node. This +// technique is usually referred to as "greedy" algorithm and gives better +// results than TopK but is considerably slower too. +type TopCentrality struct { + centralityMetric *BetweennessCentrality +} + +// A compile time assertion to ensure TopCentrality meets the +// AttachmentHeuristic interface. +var _ AttachmentHeuristic = (*TopCentrality)(nil) + +// NewTopCentrality constructs and returns a new TopCentrality heuristic. +func NewTopCentrality() *TopCentrality { + metric, err := NewBetweennessCentralityMetric( + runtime.NumCPU(), + ) + if err != nil { + panic(err) + } + + return &TopCentrality{ + centralityMetric: metric, + } +} + +// Name returns the name of the heuristic. +func (g *TopCentrality) Name() string { + return "top_centrality" +} + +// NodeScores will return a [0,1] normalized map of scores for the given nodes +// except for the ones we already have channels with. The scores will simply +// be the betweenness centrality values of the nodes. +// As our current implementation of betweenness centrality is non-incremental, +// NodeScores will recalculate the centrality values on every call, which is +// slow for large graphs. +func (g *TopCentrality) NodeScores(graph ChannelGraph, chans []Channel, + chanSize btcutil.Amount, nodes map[NodeID]struct{}) ( + map[NodeID]*NodeScore, error) { + + // Calculate betweenness centrality for the whole graph. + if err := g.centralityMetric.Refresh(graph); err != nil { + return nil, err + } + + normalize := true + centrality := g.centralityMetric.GetMetric(normalize) + + // Create a map of the existing peers for faster filtering. + existingPeers := make(map[NodeID]struct{}) + for _, c := range chans { + existingPeers[c.Node] = struct{}{} + } + + result := make(map[NodeID]*NodeScore, len(nodes)) + for nodeID := range nodes { + // Skip nodes we already have channel with. + if _, ok := existingPeers[nodeID]; ok { + continue + } + + // Skip passed nodes not in the graph. This could happen if + // the graph changed before computing the centrality values as + // the nodes we iterate are prefiltered by the autopilot agent. + score, ok := centrality[nodeID] + if !ok { + continue + } + + result[nodeID] = &NodeScore{ + NodeID: nodeID, + Score: score, + } + } + + return result, nil +} diff --git a/autopilot/top_centrality_test.go b/autopilot/top_centrality_test.go new file mode 100644 index 00000000..6069200c --- /dev/null +++ b/autopilot/top_centrality_test.go @@ -0,0 +1,109 @@ +package autopilot + +import ( + "testing" + + "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcutil" + "github.com/stretchr/testify/require" +) + +// testTopCentrality is subtest helper to which given the passed graph and +// channels creates the expected centrality score set and checks that the +// calculated score set matches it. +func testTopCentrality(t *testing.T, graph testGraph, + graphNodes map[int]*btcec.PublicKey, channelsWith []int) { + + topCentrality := NewTopCentrality() + + var channels []Channel + for _, ch := range channelsWith { + channels = append(channels, Channel{ + Node: NewNodeID(graphNodes[ch]), + }) + } + + // Start iteration from -1 to also test the case where the node set + // is empty. + for i := -1; i < len(graphNodes); i++ { + nodes := make(map[NodeID]struct{}) + expected := make(map[NodeID]*NodeScore) + + for j := 0; j <= i; j++ { + // Add node to the interest set. + nodeID := NewNodeID(graphNodes[j]) + nodes[nodeID] = struct{}{} + + // Add to the expected set unless it's a node we have + // a channel with. + haveChannel := false + for _, ch := range channels { + if nodeID == ch.Node { + haveChannel = true + break + } + } + + if !haveChannel { + score := normalizedTestGraphCentrality[j] + expected[nodeID] = &NodeScore{ + NodeID: nodeID, + Score: score, + } + } + } + + const chanSize = btcutil.SatoshiPerBitcoin + + // Attempt to get centrality scores and expect + // that the result equals with the expected set. + scores, err := topCentrality.NodeScores( + graph, channels, chanSize, nodes, + ) + + require.NoError(t, err) + require.Equal(t, expected, scores) + } +} + +// TestTopCentrality tests that we return the correct normalized centralitiy +// values given a non empty graph, and given our node has an increasing amount +// of channels from 0 to N-1 simulating the whole range from non-connected to +// fully connected. +func TestTopCentrality(t *testing.T) { + // Generate channels: {}, {0}, {0, 1}, ... {0, 1, ..., N-1} + channelsWith := [][]int{nil} + + for i := 0; i < centralityTestGraph.nodes; i++ { + channels := make([]int, i+1) + for j := 0; j <= i; j++ { + channels[j] = j + } + channelsWith = append(channelsWith, channels) + } + + for _, chanGraph := range chanGraphs { + chanGraph := chanGraph + + success := t.Run(chanGraph.name, func(t *testing.T) { + t.Parallel() + + graph, cleanup, err := chanGraph.genFunc() + require.NoError(t, err, "unable to create graph") + if cleanup != nil { + defer cleanup() + } + + // Build the test graph. + graphNodes := buildTestGraph( + t, graph, centralityTestGraph, + ) + + for _, chans := range channelsWith { + testTopCentrality(t, graph, graphNodes, chans) + } + }) + + require.True(t, success) + } +} From ccabad86072ed01d0e901a571a3990581c435cfe Mon Sep 17 00:00:00 2001 From: Andras Banki-Horvath Date: Wed, 17 Jun 2020 19:13:08 +0200 Subject: [PATCH 4/5] autopilot: add TopCentrality to the available heuristics --- autopilot/interface.go | 1 + 1 file changed, 1 insertion(+) diff --git a/autopilot/interface.go b/autopilot/interface.go index 7efeba76..21c51c9f 100644 --- a/autopilot/interface.go +++ b/autopilot/interface.go @@ -185,6 +185,7 @@ var ( availableHeuristics = []AttachmentHeuristic{ NewPrefAttachment(), NewExternalScoreAttachment(), + NewTopCentrality(), } // AvailableHeuristics is a map that holds the name of available From afbbeae4f2d1434fff597d2c9ccafc2b32500733 Mon Sep 17 00:00:00 2001 From: Andras Banki-Horvath Date: Wed, 17 Jun 2020 19:23:42 +0200 Subject: [PATCH 5/5] autopilot+test: testify betweenness centrality tests The commit also reindents the source to conform with ts=8 guideline. --- autopilot/betweenness_centrality_test.go | 110 +++++++++++------------ 1 file changed, 55 insertions(+), 55 deletions(-) diff --git a/autopilot/betweenness_centrality_test.go b/autopilot/betweenness_centrality_test.go index 75893c38..1efd005f 100644 --- a/autopilot/betweenness_centrality_test.go +++ b/autopilot/betweenness_centrality_test.go @@ -3,6 +3,8 @@ package autopilot import ( "fmt" "testing" + + "github.com/stretchr/testify/require" ) func TestBetweennessCentralityMetricConstruction(t *testing.T) { @@ -11,50 +13,46 @@ func TestBetweennessCentralityMetricConstruction(t *testing.T) { for _, workers := range failing { m, err := NewBetweennessCentralityMetric(workers) - if m != nil || err == nil { - t.Fatalf("construction must fail with <= 0 workers") - } + require.Error( + t, err, "construction must fail with <= 0 workers", + ) + require.Nil(t, m) } for _, workers := range ok { m, err := NewBetweennessCentralityMetric(workers) - if m == nil || err != nil { - t.Fatalf("construction must succeed with >= 1 workers") - } + require.NoError( + t, err, "construction must succeed with >= 1 workers", + ) + require.NotNil(t, m) } } // Tests that empty graph results in empty centrality result. func TestBetweennessCentralityEmptyGraph(t *testing.T) { centralityMetric, err := NewBetweennessCentralityMetric(1) - if err != nil { - t.Fatalf("construction must succeed with positive number of workers") - } + require.NoError( + t, err, + "construction must succeed with positive number of workers", + ) for _, chanGraph := range chanGraphs { graph, cleanup, err := chanGraph.genFunc() success := t.Run(chanGraph.name, func(t1 *testing.T) { - if err != nil { - t1.Fatalf("unable to create graph: %v", err) - } + require.NoError(t, err, "unable to create graph") + if cleanup != nil { defer cleanup() } - if err := centralityMetric.Refresh(graph); err != nil { - t.Fatalf("unexpected failure during metric refresh: %v", err) - } + err := centralityMetric.Refresh(graph) + require.NoError(t, err) centrality := centralityMetric.GetMetric(false) - if len(centrality) > 0 { - t.Fatalf("expected empty metric, got: %v", len(centrality)) - } + require.Equal(t, 0, len(centrality)) centrality = centralityMetric.GetMetric(true) - if len(centrality) > 0 { - t.Fatalf("expected empty metric, got: %v", len(centrality)) - } - + require.Equal(t, 0, len(centrality)) }) if !success { break @@ -66,7 +64,7 @@ func TestBetweennessCentralityEmptyGraph(t *testing.T) { func TestBetweennessCentralityWithNonEmptyGraph(t *testing.T) { workers := []int{1, 3, 9, 100} - results := []struct { + tests := []struct { normalize bool centrality []float64 }{ @@ -84,49 +82,51 @@ func TestBetweennessCentralityWithNonEmptyGraph(t *testing.T) { for _, chanGraph := range chanGraphs { numWorkers := numWorkers graph, cleanup, err := chanGraph.genFunc() - if err != nil { - t.Fatalf("unable to create graph: %v", err) - } + require.NoError(t, err, "unable to create graph") + if cleanup != nil { defer cleanup() } - testName := fmt.Sprintf("%v %d workers", chanGraph.name, numWorkers) + testName := fmt.Sprintf( + "%v %d workers", chanGraph.name, numWorkers, + ) + success := t.Run(testName, func(t1 *testing.T) { - centralityMetric, err := NewBetweennessCentralityMetric( + metric, err := NewBetweennessCentralityMetric( numWorkers, ) - if err != nil { - t.Fatalf("construction must succeed with " + - "positive number of workers") - } + require.NoError( + t, err, + "construction must succeed with "+ + "positive number of workers", + ) - graphNodes := buildTestGraph(t1, graph, centralityTestGraph) - if err := centralityMetric.Refresh(graph); err != nil { - t1.Fatalf("error while calculating betweeness centrality") - } - for _, expected := range results { + graphNodes := buildTestGraph( + t1, graph, centralityTestGraph, + ) + + err = metric.Refresh(graph) + require.NoError(t, err) + + for _, expected := range tests { expected := expected - centrality := centralityMetric.GetMetric(expected.normalize) + centrality := metric.GetMetric( + expected.normalize, + ) - if len(centrality) != centralityTestGraph.nodes { - t.Fatalf("expected %v values, got: %v", - centralityTestGraph.nodes, len(centrality)) - } + require.Equal(t, + centralityTestGraph.nodes, + len(centrality), + ) - for node, nodeCentrality := range expected.centrality { - nodeID := NewNodeID(graphNodes[node]) - calculatedCentrality, ok := centrality[nodeID] - if !ok { - t1.Fatalf("no result for node: %x (%v)", - nodeID, node) - } - - if nodeCentrality != calculatedCentrality { - t1.Errorf("centrality for node: %v "+ - "should be %v, got: %v", - node, nodeCentrality, calculatedCentrality) - } + for i, c := range expected.centrality { + nodeID := NewNodeID( + graphNodes[i], + ) + result, ok := centrality[nodeID] + require.True(t, ok) + require.Equal(t, c, result) } } })