2016-12-08 09:47:01 +03:00
|
|
|
package channeldb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2017-03-16 04:56:25 +03:00
|
|
|
"crypto/sha256"
|
2016-12-08 09:47:01 +03:00
|
|
|
"fmt"
|
|
|
|
"image/color"
|
2017-10-02 17:38:45 +03:00
|
|
|
"math"
|
2017-03-06 04:22:18 +03:00
|
|
|
"math/big"
|
2016-12-08 09:47:01 +03:00
|
|
|
prand "math/rand"
|
|
|
|
"net"
|
|
|
|
"reflect"
|
2016-12-20 03:58:27 +03:00
|
|
|
"runtime"
|
2016-12-08 09:47:01 +03:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2017-04-14 23:14:02 +03:00
|
|
|
"github.com/boltdb/bolt"
|
2017-03-06 04:22:18 +03:00
|
|
|
"github.com/davecgh/go-spew/spew"
|
2017-03-20 12:24:55 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
2016-12-08 09:47:01 +03:00
|
|
|
"github.com/roasbeef/btcd/btcec"
|
2017-01-06 00:56:27 +03:00
|
|
|
"github.com/roasbeef/btcd/chaincfg/chainhash"
|
2016-12-08 09:47:01 +03:00
|
|
|
"github.com/roasbeef/btcd/wire"
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2017-02-17 12:29:23 +03:00
|
|
|
testAddr = &net.TCPAddr{IP: (net.IP)([]byte{0xA, 0x0, 0x0, 0x1}),
|
|
|
|
Port: 9000}
|
2017-02-23 03:24:22 +03:00
|
|
|
anotherAddr, _ = net.ResolveTCPAddr("tcp",
|
|
|
|
"[2001:db8:85a3:0:0:8a2e:370:7334]:80")
|
|
|
|
testAddrs = []net.Addr{testAddr, anotherAddr}
|
2017-03-06 04:22:18 +03:00
|
|
|
|
|
|
|
randSource = prand.NewSource(time.Now().Unix())
|
|
|
|
randInts = prand.New(randSource)
|
|
|
|
testSig = &btcec.Signature{
|
|
|
|
R: new(big.Int),
|
|
|
|
S: new(big.Int),
|
|
|
|
}
|
|
|
|
_, _ = testSig.R.SetString("63724406601629180062774974542967536251589935445068131219452686511677818569431", 10)
|
|
|
|
_, _ = testSig.S.SetString("18801056069249825825291287104931333862866033135609736119018462340006816851118", 10)
|
2017-03-20 12:24:55 +03:00
|
|
|
|
2017-10-11 21:37:54 +03:00
|
|
|
testFeatures = lnwire.NewFeatureVector(nil, lnwire.GlobalFeatures)
|
2016-12-08 09:47:01 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
func createTestVertex(db *DB) (*LightningNode, error) {
|
|
|
|
updateTime := prand.Int63()
|
|
|
|
|
|
|
|
priv, err := btcec.NewPrivateKey(btcec.S256())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
pub := priv.PubKey().SerializeCompressed()
|
|
|
|
return &LightningNode{
|
2017-07-14 22:25:02 +03:00
|
|
|
HaveNodeAnnouncement: true,
|
|
|
|
AuthSig: testSig,
|
|
|
|
LastUpdate: time.Unix(updateTime, 0),
|
|
|
|
PubKey: priv.PubKey(),
|
|
|
|
Color: color.RGBA{1, 2, 3, 0},
|
|
|
|
Alias: "kek" + string(pub[:]),
|
|
|
|
Features: testFeatures,
|
|
|
|
Addresses: testAddrs,
|
|
|
|
db: db,
|
2016-12-08 09:47:01 +03:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestNodeInsertionAndDeletion(t *testing.T) {
|
2017-06-17 01:59:20 +03:00
|
|
|
t.Parallel()
|
|
|
|
|
2016-12-08 09:47:01 +03:00
|
|
|
db, cleanUp, err := makeTestDB()
|
2016-12-22 23:04:41 +03:00
|
|
|
defer cleanUp()
|
2016-12-08 09:47:01 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to make test database: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
graph := db.ChannelGraph()
|
|
|
|
|
|
|
|
// We'd like to test basic insertion/deletion for vertexes from the
|
|
|
|
// graph, so we'll create a test vertex to start with.
|
|
|
|
_, testPub := btcec.PrivKeyFromBytes(btcec.S256(), key[:])
|
|
|
|
node := &LightningNode{
|
2017-07-14 22:25:02 +03:00
|
|
|
HaveNodeAnnouncement: true,
|
|
|
|
AuthSig: testSig,
|
|
|
|
LastUpdate: time.Unix(1232342, 0),
|
|
|
|
PubKey: testPub,
|
|
|
|
Color: color.RGBA{1, 2, 3, 0},
|
|
|
|
Alias: "kek",
|
|
|
|
Features: testFeatures,
|
|
|
|
Addresses: testAddrs,
|
|
|
|
db: db,
|
2016-12-08 09:47:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// First, insert the node into the graph DB. This should succeed
|
|
|
|
// without any errors.
|
|
|
|
if err := graph.AddLightningNode(node); err != nil {
|
|
|
|
t.Fatalf("unable to add node: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, fetch the node from the database to ensure everything was
|
|
|
|
// serialized properly.
|
|
|
|
dbNode, err := graph.FetchLightningNode(testPub)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to locate node: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-12-22 23:49:30 +03:00
|
|
|
if _, exists, err := graph.HasLightningNode(testPub); err != nil {
|
|
|
|
t.Fatalf("unable to query for node: %v", err)
|
|
|
|
} else if !exists {
|
|
|
|
t.Fatalf("node should be found but wasn't")
|
|
|
|
}
|
|
|
|
|
2016-12-08 09:47:01 +03:00
|
|
|
// The two nodes should match exactly!
|
2017-03-20 12:24:55 +03:00
|
|
|
if err := compareNodes(node, dbNode); err != nil {
|
|
|
|
t.Fatalf("nodes don't match: %v", err)
|
2016-12-08 09:47:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Next, delete the node from the graph, this should purge all data
|
|
|
|
// related to the node.
|
|
|
|
if err := graph.DeleteLightningNode(testPub); err != nil {
|
|
|
|
t.Fatalf("unable to delete node; %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, attempt to fetch the node again. This should fail as the
|
|
|
|
// node should've been deleted from the database.
|
|
|
|
_, err = graph.FetchLightningNode(testPub)
|
|
|
|
if err != ErrGraphNodeNotFound {
|
|
|
|
t.Fatalf("fetch after delete should fail!")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-14 22:25:02 +03:00
|
|
|
// TestPartialNode checks that we can add and retrieve a LightningNode where
|
|
|
|
// where only the pubkey is known to the database.
|
|
|
|
func TestPartialNode(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
db, cleanUp, err := makeTestDB()
|
|
|
|
defer cleanUp()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to make test database: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
graph := db.ChannelGraph()
|
|
|
|
|
|
|
|
// We want to be able to insert nodes into the graph that only has the
|
|
|
|
// PubKey set.
|
|
|
|
_, testPub := btcec.PrivKeyFromBytes(btcec.S256(), key[:])
|
|
|
|
node := &LightningNode{
|
|
|
|
PubKey: testPub,
|
|
|
|
HaveNodeAnnouncement: false,
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := graph.AddLightningNode(node); err != nil {
|
|
|
|
t.Fatalf("unable to add node: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, fetch the node from the database to ensure everything was
|
|
|
|
// serialized properly.
|
|
|
|
dbNode, err := graph.FetchLightningNode(testPub)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to locate node: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, exists, err := graph.HasLightningNode(testPub); err != nil {
|
|
|
|
t.Fatalf("unable to query for node: %v", err)
|
|
|
|
} else if !exists {
|
|
|
|
t.Fatalf("node should be found but wasn't")
|
|
|
|
}
|
|
|
|
|
|
|
|
// The two nodes should match exactly! (with default values for
|
|
|
|
// LastUpdate and db set to satisfy compareNodes())
|
|
|
|
node = &LightningNode{
|
|
|
|
PubKey: testPub,
|
|
|
|
HaveNodeAnnouncement: false,
|
|
|
|
LastUpdate: time.Unix(0, 0),
|
|
|
|
db: db,
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := compareNodes(node, dbNode); err != nil {
|
|
|
|
t.Fatalf("nodes don't match: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, delete the node from the graph, this should purge all data
|
|
|
|
// related to the node.
|
|
|
|
if err := graph.DeleteLightningNode(testPub); err != nil {
|
|
|
|
t.Fatalf("unable to delete node; %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, attempt to fetch the node again. This should fail as the
|
|
|
|
// node should've been deleted from the database.
|
|
|
|
_, err = graph.FetchLightningNode(testPub)
|
|
|
|
if err != ErrGraphNodeNotFound {
|
|
|
|
t.Fatalf("fetch after delete should fail!")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-08 09:47:01 +03:00
|
|
|
func TestAliasLookup(t *testing.T) {
|
2017-06-17 01:59:20 +03:00
|
|
|
t.Parallel()
|
|
|
|
|
2016-12-08 09:47:01 +03:00
|
|
|
db, cleanUp, err := makeTestDB()
|
2016-12-22 23:04:41 +03:00
|
|
|
defer cleanUp()
|
2016-12-08 09:47:01 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to make test database: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
graph := db.ChannelGraph()
|
|
|
|
|
|
|
|
// We'd like to test the alias index within the database, so first
|
|
|
|
// create a new test node.
|
|
|
|
testNode, err := createTestVertex(db)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test node: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add the node to the graph's database, this should also insert an
|
|
|
|
// entry into the alias index for this node.
|
|
|
|
if err := graph.AddLightningNode(testNode); err != nil {
|
|
|
|
t.Fatalf("unable to add node: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, attempt to lookup the alias. The alias should exactly match
|
|
|
|
// the one which the test node was assigned.
|
|
|
|
dbAlias, err := graph.LookupAlias(testNode.PubKey)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to find alias: %v", err)
|
|
|
|
}
|
|
|
|
if dbAlias != testNode.Alias {
|
|
|
|
t.Fatalf("aliases don't match, expected %v got %v",
|
|
|
|
testNode.Alias, dbAlias)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that looking up a non-existent alias results in an error.
|
|
|
|
node, err := createTestVertex(db)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test node: %v", err)
|
|
|
|
}
|
|
|
|
_, err = graph.LookupAlias(node.PubKey)
|
|
|
|
if err != ErrNodeAliasNotFound {
|
|
|
|
t.Fatalf("alias lookup should fail for non-existent pubkey")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestSourceNode(t *testing.T) {
|
2017-06-17 01:59:20 +03:00
|
|
|
t.Parallel()
|
|
|
|
|
2016-12-08 09:47:01 +03:00
|
|
|
db, cleanUp, err := makeTestDB()
|
2016-12-22 23:04:41 +03:00
|
|
|
defer cleanUp()
|
2016-12-08 09:47:01 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to make test database: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
graph := db.ChannelGraph()
|
|
|
|
|
|
|
|
// We'd like to test the setting/getting of the source node, so we
|
|
|
|
// first create a fake node to use within the test.
|
|
|
|
testNode, err := createTestVertex(db)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test node: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt to fetch the source node, this should return an error as the
|
|
|
|
// source node hasn't yet been set.
|
|
|
|
if _, err := graph.SourceNode(); err != ErrSourceNodeNotSet {
|
|
|
|
t.Fatalf("source node shouldn't be set in new graph")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the source the source node, this should insert the node into the
|
|
|
|
// database in a special way indicating it's the source node.
|
|
|
|
if err := graph.SetSourceNode(testNode); err != nil {
|
|
|
|
t.Fatalf("unable to set source node: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Retrieve the source node from the database, it should exactly match
|
|
|
|
// the one we set above.
|
|
|
|
sourceNode, err := graph.SourceNode()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch source node: %v", err)
|
|
|
|
}
|
2017-03-20 12:24:55 +03:00
|
|
|
if err := compareNodes(testNode, sourceNode); err != nil {
|
|
|
|
t.Fatalf("nodes don't match: %v", err)
|
2016-12-08 09:47:01 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestEdgeInsertionDeletion(t *testing.T) {
|
2017-06-17 01:59:20 +03:00
|
|
|
t.Parallel()
|
|
|
|
|
2016-12-08 09:47:01 +03:00
|
|
|
db, cleanUp, err := makeTestDB()
|
2016-12-22 23:04:41 +03:00
|
|
|
defer cleanUp()
|
2016-12-08 09:47:01 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to make test database: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
graph := db.ChannelGraph()
|
|
|
|
|
|
|
|
// We'd like to test the insertion/deletion of edges, so we create two
|
|
|
|
// vertexes to connect.
|
|
|
|
node1, err := createTestVertex(db)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test node: %v", err)
|
|
|
|
}
|
|
|
|
node2, err := createTestVertex(db)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test node: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// In in addition to the fake vertexes we create some fake channel
|
|
|
|
// identifiers.
|
|
|
|
chanID := uint64(prand.Int63())
|
|
|
|
outpoint := wire.OutPoint{
|
|
|
|
Hash: rev,
|
|
|
|
Index: 9,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add the new edge to the database, this should proceed without any
|
|
|
|
// errors.
|
2017-03-06 04:22:18 +03:00
|
|
|
edgeInfo := ChannelEdgeInfo{
|
|
|
|
ChannelID: chanID,
|
2017-08-22 09:04:35 +03:00
|
|
|
ChainHash: key,
|
2017-03-06 04:22:18 +03:00
|
|
|
NodeKey1: node1.PubKey,
|
|
|
|
NodeKey2: node2.PubKey,
|
|
|
|
BitcoinKey1: node1.PubKey,
|
|
|
|
BitcoinKey2: node2.PubKey,
|
|
|
|
AuthProof: &ChannelAuthProof{
|
|
|
|
NodeSig1: testSig,
|
|
|
|
NodeSig2: testSig,
|
|
|
|
BitcoinSig1: testSig,
|
|
|
|
BitcoinSig2: testSig,
|
|
|
|
},
|
|
|
|
ChannelPoint: outpoint,
|
|
|
|
Capacity: 9000,
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := graph.AddChannelEdge(&edgeInfo); err != nil {
|
2016-12-08 09:47:01 +03:00
|
|
|
t.Fatalf("unable to create channel edge: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, attempt to delete the edge from the database, again this
|
|
|
|
// should proceed without any issues.
|
|
|
|
if err := graph.DeleteChannelEdge(&outpoint); err != nil {
|
|
|
|
t.Fatalf("unable to delete edge: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-03-06 04:22:18 +03:00
|
|
|
// Ensure that any query attempts to lookup the delete channel edge are
|
|
|
|
// properly deleted.
|
|
|
|
if _, _, _, err := graph.FetchChannelEdgesByOutpoint(&outpoint); err == nil {
|
|
|
|
t.Fatalf("channel edge not deleted")
|
|
|
|
}
|
|
|
|
if _, _, _, err := graph.FetchChannelEdgesByID(chanID); err == nil {
|
|
|
|
t.Fatalf("channel edge not deleted")
|
|
|
|
}
|
|
|
|
|
2016-12-08 09:47:01 +03:00
|
|
|
// Finally, attempt to delete a (now) non-existent edge within the
|
|
|
|
// database, this should result in an error.
|
|
|
|
err = graph.DeleteChannelEdge(&outpoint)
|
|
|
|
if err != ErrEdgeNotFound {
|
|
|
|
t.Fatalf("deleting a non-existent edge should fail!")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-02 17:38:45 +03:00
|
|
|
// TestDisconnecteBlockAtHeight checks that the pruned state of the channel
|
|
|
|
// database is what we expect after calling DisconnectBlockAtHeight.
|
|
|
|
func TestDisconnecteBlockAtHeight(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
db, cleanUp, err := makeTestDB()
|
|
|
|
defer cleanUp()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to make test database: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
graph := db.ChannelGraph()
|
|
|
|
|
|
|
|
// We'd like to test the insertion/deletion of edges, so we create two
|
|
|
|
// vertexes to connect.
|
|
|
|
node1, err := createTestVertex(db)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test node: %v", err)
|
|
|
|
}
|
|
|
|
node2, err := createTestVertex(db)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test node: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// In addition to the fake vertexes we create some fake channel
|
|
|
|
// identifiers.
|
|
|
|
var spendOutputs []*wire.OutPoint
|
|
|
|
var blockHash chainhash.Hash
|
|
|
|
copy(blockHash[:], bytes.Repeat([]byte{1}, 32))
|
|
|
|
|
|
|
|
// Prune the graph a few times to make sure we have entries in the
|
|
|
|
// prune log.
|
|
|
|
_, err = graph.PruneGraph(spendOutputs, &blockHash, 155)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to prune graph: %v", err)
|
|
|
|
}
|
|
|
|
var blockHash2 chainhash.Hash
|
|
|
|
copy(blockHash2[:], bytes.Repeat([]byte{2}, 32))
|
|
|
|
|
|
|
|
_, err = graph.PruneGraph(spendOutputs, &blockHash2, 156)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to prune graph: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll create 3 almost identical edges, so first create a helper
|
|
|
|
// method containing all logic for doing so.
|
|
|
|
createEdge := func(height uint32, txIndex uint32, txPosition uint16,
|
|
|
|
outPointIndex uint32) ChannelEdgeInfo {
|
|
|
|
shortChanID := lnwire.ShortChannelID{
|
|
|
|
BlockHeight: height,
|
|
|
|
TxIndex: txIndex,
|
|
|
|
TxPosition: txPosition,
|
|
|
|
}
|
|
|
|
outpoint := wire.OutPoint{
|
|
|
|
Hash: rev,
|
|
|
|
Index: outPointIndex,
|
|
|
|
}
|
|
|
|
|
|
|
|
edgeInfo := ChannelEdgeInfo{
|
|
|
|
ChannelID: shortChanID.ToUint64(),
|
|
|
|
ChainHash: key,
|
|
|
|
NodeKey1: node1.PubKey,
|
|
|
|
NodeKey2: node2.PubKey,
|
|
|
|
BitcoinKey1: node1.PubKey,
|
|
|
|
BitcoinKey2: node2.PubKey,
|
|
|
|
AuthProof: &ChannelAuthProof{
|
|
|
|
NodeSig1: testSig,
|
|
|
|
NodeSig2: testSig,
|
|
|
|
BitcoinSig1: testSig,
|
|
|
|
BitcoinSig2: testSig,
|
|
|
|
},
|
|
|
|
ChannelPoint: outpoint,
|
|
|
|
Capacity: 9000,
|
|
|
|
}
|
|
|
|
return edgeInfo
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create an edge which has its block height at 156.
|
|
|
|
height := uint32(156)
|
|
|
|
edgeInfo := createEdge(height, 0, 0, 0)
|
|
|
|
|
|
|
|
// Create an edge with block height 157. We give it
|
|
|
|
// maximum values for tx index and position, to make
|
|
|
|
// sure our database range scan get edges from the
|
|
|
|
// entire range.
|
|
|
|
edgeInfo2 := createEdge(height+1, math.MaxUint32&0x00ffffff,
|
|
|
|
math.MaxUint16, 1)
|
|
|
|
|
|
|
|
// Create a third edge, this with a block height of 155.
|
|
|
|
edgeInfo3 := createEdge(height-1, 0, 0, 2)
|
|
|
|
|
|
|
|
// Now add all these new edges to the database.
|
|
|
|
if err := graph.AddChannelEdge(&edgeInfo); err != nil {
|
|
|
|
t.Fatalf("unable to create channel edge: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := graph.AddChannelEdge(&edgeInfo2); err != nil {
|
|
|
|
t.Fatalf("unable to create channel edge: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := graph.AddChannelEdge(&edgeInfo3); err != nil {
|
|
|
|
t.Fatalf("unable to create channel edge: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Call DisconnectBlockAtHeight, which should prune every channel
|
|
|
|
// that has an funding height of 'height' or greater.
|
|
|
|
removed, err := graph.DisconnectBlockAtHeight(uint32(height))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to prune %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The two edges should have been removed.
|
|
|
|
if len(removed) != 2 {
|
|
|
|
t.Fatalf("expected two edges to be removed from graph, "+
|
|
|
|
"only %d were", len(removed))
|
|
|
|
}
|
|
|
|
if removed[0].ChannelID != edgeInfo.ChannelID {
|
|
|
|
t.Fatalf("expected edge to be removed from graph")
|
|
|
|
}
|
|
|
|
if removed[1].ChannelID != edgeInfo2.ChannelID {
|
|
|
|
t.Fatalf("expected edge to be removed from graph")
|
|
|
|
}
|
|
|
|
|
|
|
|
// The two first edges should be removed from the db.
|
|
|
|
_, _, has, err := graph.HasChannelEdge(edgeInfo.ChannelID)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to query for edge: %v", err)
|
|
|
|
}
|
|
|
|
if has {
|
|
|
|
t.Fatalf("edge1 was not pruned from the graph")
|
|
|
|
}
|
|
|
|
_, _, has, err = graph.HasChannelEdge(edgeInfo2.ChannelID)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to query for edge: %v", err)
|
|
|
|
}
|
|
|
|
if has {
|
|
|
|
t.Fatalf("edge2 was not pruned from the graph")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Edge 3 should not be removed.
|
|
|
|
_, _, has, err = graph.HasChannelEdge(edgeInfo3.ChannelID)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to query for edge: %v", err)
|
|
|
|
}
|
|
|
|
if !has {
|
|
|
|
t.Fatalf("edge3 was pruned from the graph")
|
|
|
|
}
|
|
|
|
|
|
|
|
// PruneTip should be set to the blockHash we specified for the block
|
|
|
|
// at height 155.
|
|
|
|
hash, h, err := graph.PruneTip()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get prune tip: %v", err)
|
|
|
|
}
|
|
|
|
if !blockHash.IsEqual(hash) {
|
|
|
|
t.Fatalf("expected best block to be %x, was %x", blockHash, hash)
|
|
|
|
}
|
|
|
|
if h != height-1 {
|
|
|
|
t.Fatalf("expected best block height to be %d, was %d", height-1, h)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-06 04:22:18 +03:00
|
|
|
func assertEdgeInfoEqual(t *testing.T, e1 *ChannelEdgeInfo,
|
|
|
|
e2 *ChannelEdgeInfo) {
|
|
|
|
|
|
|
|
if e1.ChannelID != e2.ChannelID {
|
|
|
|
t.Fatalf("chan id's don't match: %v vs %v", e1.ChannelID,
|
|
|
|
e2.ChannelID)
|
|
|
|
}
|
|
|
|
|
2017-08-22 09:04:35 +03:00
|
|
|
if e1.ChainHash != e2.ChainHash {
|
|
|
|
t.Fatalf("chain hashes don't match: %v vs %v", e1.ChainHash,
|
|
|
|
e2.ChainHash)
|
|
|
|
}
|
|
|
|
|
2017-03-06 04:22:18 +03:00
|
|
|
if !e1.NodeKey1.IsEqual(e2.NodeKey1) {
|
|
|
|
t.Fatalf("nodekey1 doesn't match")
|
|
|
|
}
|
|
|
|
if !e1.NodeKey2.IsEqual(e2.NodeKey2) {
|
|
|
|
t.Fatalf("nodekey2 doesn't match")
|
|
|
|
}
|
|
|
|
if !e1.BitcoinKey1.IsEqual(e2.BitcoinKey1) {
|
|
|
|
t.Fatalf("bitcoinkey1 doesn't match")
|
|
|
|
}
|
|
|
|
if !e1.BitcoinKey2.IsEqual(e2.BitcoinKey2) {
|
|
|
|
t.Fatalf("bitcoinkey2 doesn't match")
|
|
|
|
}
|
|
|
|
|
|
|
|
if !bytes.Equal(e1.Features, e2.Features) {
|
|
|
|
t.Fatalf("features doesn't match: %x vs %x", e1.Features,
|
|
|
|
e2.Features)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !e1.AuthProof.NodeSig1.IsEqual(e2.AuthProof.NodeSig1) {
|
|
|
|
t.Fatalf("nodesig1 doesn't match: %v vs %v",
|
|
|
|
spew.Sdump(e1.AuthProof.NodeSig1),
|
|
|
|
spew.Sdump(e2.AuthProof.NodeSig1))
|
|
|
|
}
|
|
|
|
if !e1.AuthProof.NodeSig2.IsEqual(e2.AuthProof.NodeSig2) {
|
|
|
|
t.Fatalf("nodesig2 doesn't match")
|
|
|
|
}
|
|
|
|
if !e1.AuthProof.BitcoinSig1.IsEqual(e2.AuthProof.BitcoinSig1) {
|
|
|
|
t.Fatalf("bitcoinsig1 doesn't match")
|
|
|
|
}
|
|
|
|
if !e1.AuthProof.BitcoinSig2.IsEqual(e2.AuthProof.BitcoinSig2) {
|
|
|
|
t.Fatalf("bitcoinsig2 doesn't match")
|
|
|
|
}
|
|
|
|
|
|
|
|
if e1.ChannelPoint != e2.ChannelPoint {
|
|
|
|
t.Fatalf("channel point match: %v vs %v", e1.ChannelPoint,
|
|
|
|
e2.ChannelPoint)
|
|
|
|
}
|
|
|
|
|
|
|
|
if e1.Capacity != e2.Capacity {
|
|
|
|
t.Fatalf("capacity doesn't match: %v vs %v", e1.Capacity,
|
|
|
|
e2.Capacity)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-08 09:47:01 +03:00
|
|
|
func TestEdgeInfoUpdates(t *testing.T) {
|
2017-06-17 01:59:20 +03:00
|
|
|
t.Parallel()
|
|
|
|
|
2016-12-08 09:47:01 +03:00
|
|
|
db, cleanUp, err := makeTestDB()
|
2016-12-22 23:04:41 +03:00
|
|
|
defer cleanUp()
|
2016-12-08 09:47:01 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to make test database: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
graph := db.ChannelGraph()
|
|
|
|
|
|
|
|
// We'd like to test the update of edges inserted into the database, so
|
|
|
|
// we create two vertexes to connect.
|
|
|
|
node1, err := createTestVertex(db)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test node: %v", err)
|
|
|
|
}
|
|
|
|
if err := graph.AddLightningNode(node1); err != nil {
|
|
|
|
t.Fatalf("unable to add node: %v", err)
|
|
|
|
}
|
|
|
|
node2, err := createTestVertex(db)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test node: %v", err)
|
|
|
|
}
|
|
|
|
if err := graph.AddLightningNode(node2); err != nil {
|
|
|
|
t.Fatalf("unable to add node: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
firstNode *LightningNode
|
|
|
|
secondNode *LightningNode
|
|
|
|
)
|
|
|
|
node1Bytes := node1.PubKey.SerializeCompressed()
|
|
|
|
node2Bytes := node2.PubKey.SerializeCompressed()
|
|
|
|
if bytes.Compare(node1Bytes, node2Bytes) == -1 {
|
|
|
|
firstNode = node1
|
|
|
|
secondNode = node2
|
|
|
|
} else {
|
|
|
|
firstNode = node2
|
|
|
|
secondNode = node1
|
|
|
|
}
|
|
|
|
|
|
|
|
// In in addition to the fake vertexes we create some fake channel
|
|
|
|
// identifiers.
|
|
|
|
chanID := uint64(prand.Int63())
|
|
|
|
outpoint := wire.OutPoint{
|
|
|
|
Hash: rev,
|
|
|
|
Index: 9,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add the new edge to the database, this should proceed without any
|
|
|
|
// errors.
|
2017-03-06 04:22:18 +03:00
|
|
|
edgeInfo := &ChannelEdgeInfo{
|
|
|
|
ChannelID: chanID,
|
2017-08-22 09:04:35 +03:00
|
|
|
ChainHash: key,
|
2017-03-06 04:22:18 +03:00
|
|
|
NodeKey1: firstNode.PubKey,
|
|
|
|
NodeKey2: secondNode.PubKey,
|
|
|
|
BitcoinKey1: firstNode.PubKey,
|
|
|
|
BitcoinKey2: secondNode.PubKey,
|
|
|
|
AuthProof: &ChannelAuthProof{
|
|
|
|
NodeSig1: testSig,
|
|
|
|
NodeSig2: testSig,
|
|
|
|
BitcoinSig1: testSig,
|
|
|
|
BitcoinSig2: testSig,
|
|
|
|
},
|
|
|
|
ChannelPoint: outpoint,
|
|
|
|
Capacity: 1000,
|
|
|
|
}
|
|
|
|
if err := graph.AddChannelEdge(edgeInfo); err != nil {
|
2016-12-08 09:47:01 +03:00
|
|
|
t.Fatalf("unable to create channel edge: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the edge added, we can now create some fake edge information to
|
|
|
|
// update for both edges.
|
2017-03-06 04:22:18 +03:00
|
|
|
edge1 := &ChannelEdgePolicy{
|
2017-03-27 18:01:12 +03:00
|
|
|
Signature: testSig,
|
2016-12-08 09:47:01 +03:00
|
|
|
ChannelID: chanID,
|
|
|
|
LastUpdate: time.Unix(433453, 0),
|
|
|
|
Flags: 0,
|
2017-03-06 04:22:18 +03:00
|
|
|
TimeLockDelta: 99,
|
2016-12-08 09:47:01 +03:00
|
|
|
MinHTLC: 2342135,
|
|
|
|
FeeBaseMSat: 4352345,
|
|
|
|
FeeProportionalMillionths: 3452352,
|
2017-03-06 04:22:18 +03:00
|
|
|
Node: secondNode,
|
|
|
|
db: db,
|
2016-12-08 09:47:01 +03:00
|
|
|
}
|
2017-03-06 04:22:18 +03:00
|
|
|
edge2 := &ChannelEdgePolicy{
|
2017-03-27 18:01:12 +03:00
|
|
|
Signature: testSig,
|
2016-12-08 09:47:01 +03:00
|
|
|
ChannelID: chanID,
|
|
|
|
LastUpdate: time.Unix(124234, 0),
|
|
|
|
Flags: 1,
|
2017-03-06 04:22:18 +03:00
|
|
|
TimeLockDelta: 99,
|
2016-12-08 09:47:01 +03:00
|
|
|
MinHTLC: 2342135,
|
|
|
|
FeeBaseMSat: 4352345,
|
|
|
|
FeeProportionalMillionths: 90392423,
|
2017-03-06 04:22:18 +03:00
|
|
|
Node: firstNode,
|
|
|
|
db: db,
|
2016-12-08 09:47:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Next, insert both nodes into the database, they should both be
|
|
|
|
// inserted without any issues.
|
2017-03-06 04:22:18 +03:00
|
|
|
if err := graph.UpdateEdgePolicy(edge1); err != nil {
|
2016-12-08 09:47:01 +03:00
|
|
|
t.Fatalf("unable to update edge: %v", err)
|
|
|
|
}
|
2017-03-06 04:22:18 +03:00
|
|
|
if err := graph.UpdateEdgePolicy(edge2); err != nil {
|
2016-12-08 09:47:01 +03:00
|
|
|
t.Fatalf("unable to update edge: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-12-22 23:49:30 +03:00
|
|
|
// Check for existence of the edge within the database, it should be
|
|
|
|
// found.
|
|
|
|
_, _, found, err := graph.HasChannelEdge(chanID)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to query for edge: %v", err)
|
|
|
|
} else if !found {
|
|
|
|
t.Fatalf("graph should have of inserted edge")
|
|
|
|
}
|
|
|
|
|
2017-03-06 04:22:18 +03:00
|
|
|
// We should also be able to retrieve the channelID only knowing the
|
2016-12-27 06:52:03 +03:00
|
|
|
// channel point of the channel.
|
|
|
|
dbChanID, err := graph.ChannelID(&outpoint)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to retrieve channel ID: %v", err)
|
|
|
|
}
|
|
|
|
if dbChanID != chanID {
|
|
|
|
t.Fatalf("chan ID's mismatch, expected %v got %v", dbChanID,
|
|
|
|
chanID)
|
|
|
|
}
|
|
|
|
|
2016-12-08 09:47:01 +03:00
|
|
|
// With the edges inserted, perform some queries to ensure that they've
|
|
|
|
// been inserted properly.
|
2017-03-06 04:22:18 +03:00
|
|
|
dbEdgeInfo, dbEdge1, dbEdge2, err := graph.FetchChannelEdgesByID(chanID)
|
2016-12-08 09:47:01 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch channel by ID: %v", err)
|
|
|
|
}
|
2017-03-20 12:24:55 +03:00
|
|
|
if err := compareEdgePolicies(dbEdge1, edge1); err != nil {
|
|
|
|
t.Fatalf("edge doesn't match: %v", err)
|
2016-12-08 09:47:01 +03:00
|
|
|
}
|
2017-03-20 12:24:55 +03:00
|
|
|
if err := compareEdgePolicies(dbEdge2, edge2); err != nil {
|
|
|
|
t.Fatalf("edge doesn't match: %v", err)
|
2016-12-08 09:47:01 +03:00
|
|
|
}
|
2017-03-06 04:22:18 +03:00
|
|
|
assertEdgeInfoEqual(t, dbEdgeInfo, edgeInfo)
|
2016-12-08 09:47:01 +03:00
|
|
|
|
|
|
|
// Next, attempt to query the channel edges according to the outpoint
|
|
|
|
// of the channel.
|
2017-03-06 04:22:18 +03:00
|
|
|
dbEdgeInfo, dbEdge1, dbEdge2, err = graph.FetchChannelEdgesByOutpoint(&outpoint)
|
2016-12-08 09:47:01 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch channel by ID: %v", err)
|
|
|
|
}
|
2017-03-20 12:24:55 +03:00
|
|
|
if err := compareEdgePolicies(dbEdge1, edge1); err != nil {
|
|
|
|
t.Fatalf("edge doesn't match: %v", err)
|
2016-12-08 09:47:01 +03:00
|
|
|
}
|
2017-03-20 12:24:55 +03:00
|
|
|
if err := compareEdgePolicies(dbEdge2, edge2); err != nil {
|
|
|
|
t.Fatalf("edge doesn't match: %v", err)
|
2016-12-08 09:47:01 +03:00
|
|
|
}
|
2017-03-06 04:22:18 +03:00
|
|
|
assertEdgeInfoEqual(t, dbEdgeInfo, edgeInfo)
|
2016-12-08 09:47:01 +03:00
|
|
|
}
|
|
|
|
|
2017-03-06 04:22:18 +03:00
|
|
|
func randEdgePolicy(chanID uint64, op wire.OutPoint, db *DB) *ChannelEdgePolicy {
|
2016-12-08 09:47:01 +03:00
|
|
|
update := prand.Int63()
|
|
|
|
|
2017-03-06 04:22:18 +03:00
|
|
|
return &ChannelEdgePolicy{
|
2016-12-08 09:47:01 +03:00
|
|
|
ChannelID: chanID,
|
|
|
|
LastUpdate: time.Unix(update, 0),
|
2017-03-06 04:22:18 +03:00
|
|
|
TimeLockDelta: uint16(prand.Int63()),
|
2017-08-22 09:04:35 +03:00
|
|
|
MinHTLC: lnwire.MilliSatoshi(prand.Int63()),
|
|
|
|
FeeBaseMSat: lnwire.MilliSatoshi(prand.Int63()),
|
|
|
|
FeeProportionalMillionths: lnwire.MilliSatoshi(prand.Int63()),
|
2017-03-06 04:22:18 +03:00
|
|
|
db: db,
|
2016-12-08 09:47:01 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestGraphTraversal(t *testing.T) {
|
2017-06-17 01:59:20 +03:00
|
|
|
t.Parallel()
|
|
|
|
|
2016-12-08 09:47:01 +03:00
|
|
|
db, cleanUp, err := makeTestDB()
|
2016-12-22 23:04:41 +03:00
|
|
|
defer cleanUp()
|
2016-12-08 09:47:01 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to make test database: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
graph := db.ChannelGraph()
|
|
|
|
|
|
|
|
// We'd like to test some of the graph traversal capabilities within
|
|
|
|
// the DB, so we'll create a series of fake nodes to insert into the
|
|
|
|
// graph.
|
|
|
|
const numNodes = 20
|
|
|
|
nodes := make([]*LightningNode, numNodes)
|
|
|
|
nodeIndex := map[string]struct{}{}
|
|
|
|
for i := 0; i < numNodes; i++ {
|
|
|
|
node, err := createTestVertex(db)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create node: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
nodes[i] = node
|
|
|
|
nodeIndex[node.Alias] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add each of the nodes into the graph, they should be inserted
|
|
|
|
// without error.
|
|
|
|
for _, node := range nodes {
|
|
|
|
if err := graph.AddLightningNode(node); err != nil {
|
|
|
|
t.Fatalf("unable to add node: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate over each node as returned by the graph, if all nodes are
|
|
|
|
// reached, then the map created above should be empty.
|
2017-04-14 23:14:02 +03:00
|
|
|
err = graph.ForEachNode(nil, func(_ *bolt.Tx, node *LightningNode) error {
|
2016-12-08 09:47:01 +03:00
|
|
|
delete(nodeIndex, node.Alias)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("for each failure: %v", err)
|
|
|
|
}
|
|
|
|
if len(nodeIndex) != 0 {
|
|
|
|
t.Fatalf("all nodes not reached within ForEach")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Determine which node is "smaller", we'll need this in order to
|
|
|
|
// properly create the edges for the graph.
|
|
|
|
var firstNode, secondNode *LightningNode
|
|
|
|
node1Bytes := nodes[0].PubKey.SerializeCompressed()
|
|
|
|
node2Bytes := nodes[1].PubKey.SerializeCompressed()
|
|
|
|
if bytes.Compare(node1Bytes, node2Bytes) == -1 {
|
|
|
|
firstNode = nodes[0]
|
|
|
|
secondNode = nodes[1]
|
|
|
|
} else {
|
|
|
|
firstNode = nodes[0]
|
|
|
|
secondNode = nodes[1]
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create 5 channels between the first two nodes we generated above.
|
|
|
|
const numChannels = 5
|
|
|
|
chanIndex := map[uint64]struct{}{}
|
|
|
|
for i := 0; i < numChannels; i++ {
|
2017-03-16 04:56:25 +03:00
|
|
|
txHash := sha256.Sum256([]byte{byte(i)})
|
2016-12-08 09:47:01 +03:00
|
|
|
chanID := uint64(i + 1)
|
|
|
|
op := wire.OutPoint{
|
|
|
|
Hash: txHash,
|
|
|
|
Index: 0,
|
|
|
|
}
|
|
|
|
|
2017-03-06 04:22:18 +03:00
|
|
|
edgeInfo := ChannelEdgeInfo{
|
|
|
|
ChannelID: chanID,
|
2017-08-22 09:04:35 +03:00
|
|
|
ChainHash: key,
|
2017-03-06 04:22:18 +03:00
|
|
|
NodeKey1: nodes[0].PubKey,
|
|
|
|
NodeKey2: nodes[1].PubKey,
|
|
|
|
BitcoinKey1: nodes[0].PubKey,
|
|
|
|
BitcoinKey2: nodes[1].PubKey,
|
|
|
|
AuthProof: &ChannelAuthProof{
|
|
|
|
NodeSig1: testSig,
|
|
|
|
NodeSig2: testSig,
|
|
|
|
BitcoinSig1: testSig,
|
|
|
|
BitcoinSig2: testSig,
|
|
|
|
},
|
|
|
|
ChannelPoint: op,
|
|
|
|
Capacity: 1000,
|
|
|
|
}
|
|
|
|
err := graph.AddChannelEdge(&edgeInfo)
|
2016-12-08 09:47:01 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to add node: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create and add an edge with random data that points from
|
|
|
|
// node1 -> node2.
|
2017-03-06 04:22:18 +03:00
|
|
|
edge := randEdgePolicy(chanID, op, db)
|
2016-12-08 09:47:01 +03:00
|
|
|
edge.Flags = 0
|
|
|
|
edge.Node = secondNode
|
2017-03-27 18:01:12 +03:00
|
|
|
edge.Signature = testSig
|
2017-03-06 04:22:18 +03:00
|
|
|
if err := graph.UpdateEdgePolicy(edge); err != nil {
|
2016-12-08 09:47:01 +03:00
|
|
|
t.Fatalf("unable to update edge: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create another random edge that points from node2 -> node1
|
|
|
|
// this time.
|
2017-03-06 04:22:18 +03:00
|
|
|
edge = randEdgePolicy(chanID, op, db)
|
2016-12-08 09:47:01 +03:00
|
|
|
edge.Flags = 1
|
|
|
|
edge.Node = firstNode
|
2017-03-27 18:01:12 +03:00
|
|
|
edge.Signature = testSig
|
2017-03-06 04:22:18 +03:00
|
|
|
if err := graph.UpdateEdgePolicy(edge); err != nil {
|
2016-12-08 09:47:01 +03:00
|
|
|
t.Fatalf("unable to update edge: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
chanIndex[chanID] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate through all the known channels within the graph DB, once
|
|
|
|
// again if the map is empty that that indicates that all edges have
|
|
|
|
// properly been reached.
|
2017-03-06 04:22:18 +03:00
|
|
|
err = graph.ForEachChannel(func(ei *ChannelEdgeInfo, _ *ChannelEdgePolicy,
|
|
|
|
_ *ChannelEdgePolicy) error {
|
|
|
|
|
|
|
|
delete(chanIndex, ei.ChannelID)
|
2016-12-08 09:47:01 +03:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("for each failure: %v", err)
|
|
|
|
}
|
|
|
|
if len(chanIndex) != 0 {
|
|
|
|
t.Fatalf("all edges not reached within ForEach")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, we want to test the ability to iterate over all the
|
|
|
|
// outgoing channels for a particular node.
|
|
|
|
numNodeChans := 0
|
2017-04-14 23:14:02 +03:00
|
|
|
err = firstNode.ForEachChannel(nil, func(_ *bolt.Tx, _ *ChannelEdgeInfo,
|
2017-08-22 09:04:35 +03:00
|
|
|
outEdge, inEdge *ChannelEdgePolicy) error {
|
2017-04-14 23:14:02 +03:00
|
|
|
|
2016-12-08 09:47:01 +03:00
|
|
|
// Each each should indicate that it's outgoing (pointed
|
|
|
|
// towards the second node).
|
2017-08-22 09:04:35 +03:00
|
|
|
if !outEdge.Node.PubKey.IsEqual(secondNode.PubKey) {
|
|
|
|
return fmt.Errorf("wrong outgoing edge")
|
|
|
|
}
|
|
|
|
|
|
|
|
// The incoming edge should also indicate that it's pointing to
|
|
|
|
// the origin node.
|
|
|
|
if !inEdge.Node.PubKey.IsEqual(firstNode.PubKey) {
|
2016-12-08 09:47:01 +03:00
|
|
|
return fmt.Errorf("wrong outgoing edge")
|
|
|
|
}
|
2017-08-22 09:04:35 +03:00
|
|
|
|
2017-02-23 22:56:47 +03:00
|
|
|
numNodeChans++
|
2016-12-08 09:47:01 +03:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("for each failure: %v", err)
|
|
|
|
}
|
|
|
|
if numNodeChans != numChannels {
|
2017-08-22 09:04:35 +03:00
|
|
|
t.Fatalf("all edges for node not reached within ForEach: "+
|
|
|
|
"expected %v, got %v", numChannels, numNodeChans)
|
2016-12-08 09:47:01 +03:00
|
|
|
}
|
|
|
|
}
|
2016-12-20 03:58:27 +03:00
|
|
|
|
2017-01-06 00:56:27 +03:00
|
|
|
func assertPruneTip(t *testing.T, graph *ChannelGraph, blockHash *chainhash.Hash,
|
2016-12-20 03:58:27 +03:00
|
|
|
blockHeight uint32) {
|
|
|
|
|
|
|
|
pruneHash, pruneHeight, err := graph.PruneTip()
|
|
|
|
if err != nil {
|
|
|
|
_, _, line, _ := runtime.Caller(1)
|
|
|
|
t.Fatalf("line %v: unable to fetch prune tip: %v", line, err)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(blockHash[:], pruneHash[:]) {
|
|
|
|
_, _, line, _ := runtime.Caller(1)
|
|
|
|
t.Fatalf("line: %v, prune tips don't match, expected %x got %x",
|
|
|
|
line, blockHash, pruneHash)
|
|
|
|
}
|
|
|
|
if pruneHeight != blockHeight {
|
|
|
|
_, _, line, _ := runtime.Caller(1)
|
|
|
|
t.Fatalf("line %v: prune heights don't match, expected %v "+
|
|
|
|
"got %v", line, blockHeight, pruneHeight)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func asserNumChans(t *testing.T, graph *ChannelGraph, n int) {
|
|
|
|
numChans := 0
|
2017-03-06 04:22:18 +03:00
|
|
|
if err := graph.ForEachChannel(func(*ChannelEdgeInfo, *ChannelEdgePolicy,
|
|
|
|
*ChannelEdgePolicy) error {
|
|
|
|
|
2017-02-23 22:56:47 +03:00
|
|
|
numChans++
|
2016-12-20 03:58:27 +03:00
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
_, _, line, _ := runtime.Caller(1)
|
|
|
|
t.Fatalf("line %v:unable to scan channels: %v", line, err)
|
|
|
|
}
|
|
|
|
if numChans != n {
|
|
|
|
_, _, line, _ := runtime.Caller(1)
|
2017-05-11 02:19:42 +03:00
|
|
|
t.Fatalf("line %v: expected %v chans instead have %v", line,
|
|
|
|
n, numChans)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func assertChanViewEqual(t *testing.T, a []wire.OutPoint, b []*wire.OutPoint) {
|
|
|
|
if len(a) != len(b) {
|
|
|
|
_, _, line, _ := runtime.Caller(1)
|
|
|
|
t.Fatalf("line %v: chan views dont match", line)
|
|
|
|
}
|
|
|
|
|
|
|
|
chanViewSet := make(map[wire.OutPoint]struct{})
|
|
|
|
for _, op := range a {
|
|
|
|
chanViewSet[op] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, op := range b {
|
|
|
|
if _, ok := chanViewSet[*op]; !ok {
|
|
|
|
_, _, line, _ := runtime.Caller(1)
|
|
|
|
t.Fatalf("line %v: chanPoint(%v) not found in first view",
|
|
|
|
line, op)
|
|
|
|
}
|
2016-12-20 03:58:27 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestGraphPruning(t *testing.T) {
|
2017-06-17 01:59:20 +03:00
|
|
|
t.Parallel()
|
|
|
|
|
2016-12-20 03:58:27 +03:00
|
|
|
db, cleanUp, err := makeTestDB()
|
2016-12-22 23:04:41 +03:00
|
|
|
defer cleanUp()
|
2016-12-20 03:58:27 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to make test database: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
graph := db.ChannelGraph()
|
|
|
|
|
|
|
|
// As initial set up for the test, we'll create a graph with 5 vertexes
|
|
|
|
// and enough edges to create a fully connected graph. The graph will
|
|
|
|
// be rather simple, representing a straight line.
|
|
|
|
const numNodes = 5
|
|
|
|
graphNodes := make([]*LightningNode, numNodes)
|
|
|
|
for i := 0; i < numNodes; i++ {
|
|
|
|
node, err := createTestVertex(db)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create node: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := graph.AddLightningNode(node); err != nil {
|
|
|
|
t.Fatalf("unable to add node: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
graphNodes[i] = node
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the vertexes created, we'll next create a series of channels
|
|
|
|
// between them.
|
|
|
|
channelPoints := make([]*wire.OutPoint, 0, numNodes-1)
|
|
|
|
for i := 0; i < numNodes-1; i++ {
|
2017-03-16 04:56:25 +03:00
|
|
|
txHash := sha256.Sum256([]byte{byte(i)})
|
2016-12-20 03:58:27 +03:00
|
|
|
chanID := uint64(i + 1)
|
|
|
|
op := wire.OutPoint{
|
|
|
|
Hash: txHash,
|
|
|
|
Index: 0,
|
|
|
|
}
|
|
|
|
|
|
|
|
channelPoints = append(channelPoints, &op)
|
|
|
|
|
2017-03-06 04:22:18 +03:00
|
|
|
edgeInfo := ChannelEdgeInfo{
|
|
|
|
ChannelID: chanID,
|
2017-08-22 09:04:35 +03:00
|
|
|
ChainHash: key,
|
2017-03-06 04:22:18 +03:00
|
|
|
NodeKey1: graphNodes[i].PubKey,
|
|
|
|
NodeKey2: graphNodes[i+1].PubKey,
|
|
|
|
BitcoinKey1: graphNodes[i].PubKey,
|
|
|
|
BitcoinKey2: graphNodes[i+1].PubKey,
|
|
|
|
AuthProof: &ChannelAuthProof{
|
|
|
|
NodeSig1: testSig,
|
|
|
|
NodeSig2: testSig,
|
|
|
|
BitcoinSig1: testSig,
|
|
|
|
BitcoinSig2: testSig,
|
|
|
|
},
|
|
|
|
ChannelPoint: op,
|
|
|
|
Capacity: 1000,
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := graph.AddChannelEdge(&edgeInfo); err != nil {
|
2016-12-20 03:58:27 +03:00
|
|
|
t.Fatalf("unable to add node: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create and add an edge with random data that points from
|
|
|
|
// node_i -> node_i+1
|
2017-03-06 04:22:18 +03:00
|
|
|
edge := randEdgePolicy(chanID, op, db)
|
2016-12-20 03:58:27 +03:00
|
|
|
edge.Flags = 0
|
|
|
|
edge.Node = graphNodes[i]
|
2017-03-27 18:01:12 +03:00
|
|
|
edge.Signature = testSig
|
2017-03-06 04:22:18 +03:00
|
|
|
if err := graph.UpdateEdgePolicy(edge); err != nil {
|
2016-12-20 03:58:27 +03:00
|
|
|
t.Fatalf("unable to update edge: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create another random edge that points from node_i+1 ->
|
|
|
|
// node_i this time.
|
2017-03-06 04:22:18 +03:00
|
|
|
edge = randEdgePolicy(chanID, op, db)
|
2016-12-20 03:58:27 +03:00
|
|
|
edge.Flags = 1
|
|
|
|
edge.Node = graphNodes[i]
|
2017-03-27 18:01:12 +03:00
|
|
|
edge.Signature = testSig
|
2017-03-06 04:22:18 +03:00
|
|
|
if err := graph.UpdateEdgePolicy(edge); err != nil {
|
2016-12-20 03:58:27 +03:00
|
|
|
t.Fatalf("unable to update edge: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-11 02:19:42 +03:00
|
|
|
// With all the channel points added, we'll consult the graph to ensure
|
|
|
|
// it has the same channel view as the one we just constructed.
|
|
|
|
channelView, err := graph.ChannelView()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get graph channel view: %v", err)
|
|
|
|
}
|
|
|
|
assertChanViewEqual(t, channelView, channelPoints)
|
|
|
|
|
2016-12-20 03:58:27 +03:00
|
|
|
// Now with our test graph created, we can test the pruning
|
|
|
|
// capabilities of the channel graph.
|
|
|
|
|
|
|
|
// First we create a mock block that ends up closing the first two
|
|
|
|
// channels.
|
2017-01-06 00:56:27 +03:00
|
|
|
var blockHash chainhash.Hash
|
2016-12-20 03:58:27 +03:00
|
|
|
copy(blockHash[:], bytes.Repeat([]byte{1}, 32))
|
|
|
|
blockHeight := uint32(1)
|
|
|
|
block := channelPoints[:2]
|
2017-03-06 04:22:18 +03:00
|
|
|
prunedChans, err := graph.PruneGraph(block, &blockHash, blockHeight)
|
2016-12-20 03:58:27 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to prune graph: %v", err)
|
|
|
|
}
|
2017-03-06 04:22:18 +03:00
|
|
|
if len(prunedChans) != 2 {
|
2016-12-20 03:58:27 +03:00
|
|
|
t.Fatalf("incorrect number of channels pruned: expected %v, got %v",
|
2017-03-06 04:22:18 +03:00
|
|
|
2, prunedChans)
|
2016-12-20 03:58:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Now ensure that the prune tip has been updated.
|
|
|
|
assertPruneTip(t, graph, &blockHash, blockHeight)
|
|
|
|
|
|
|
|
// Count up the number of channels known within the graph, only 2
|
|
|
|
// should be remaining.
|
|
|
|
asserNumChans(t, graph, 2)
|
|
|
|
|
2017-05-11 02:19:42 +03:00
|
|
|
// Those channels should also be missing from the channel view.
|
|
|
|
channelView, err = graph.ChannelView()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get graph channel view: %v", err)
|
|
|
|
}
|
|
|
|
assertChanViewEqual(t, channelView, channelPoints[2:])
|
|
|
|
|
2016-12-20 03:58:27 +03:00
|
|
|
// Next we'll create a block that doesn't close any channels within the
|
|
|
|
// graph to test the negative error case.
|
2017-03-16 04:56:25 +03:00
|
|
|
fakeHash := sha256.Sum256([]byte("test prune"))
|
2016-12-20 03:58:27 +03:00
|
|
|
nonChannel := &wire.OutPoint{
|
|
|
|
Hash: fakeHash,
|
|
|
|
Index: 9,
|
|
|
|
}
|
2017-03-16 04:56:25 +03:00
|
|
|
blockHash = sha256.Sum256(blockHash[:])
|
2016-12-20 03:58:27 +03:00
|
|
|
blockHeight = 2
|
2017-03-06 04:22:18 +03:00
|
|
|
prunedChans, err = graph.PruneGraph([]*wire.OutPoint{nonChannel},
|
2016-12-20 03:58:27 +03:00
|
|
|
&blockHash, blockHeight)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to prune graph: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// No channels should've been detected as pruned.
|
2017-03-06 04:22:18 +03:00
|
|
|
if len(prunedChans) != 0 {
|
2016-12-20 03:58:27 +03:00
|
|
|
t.Fatalf("channels were pruned but shouldn't have been")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Once again, the prune tip should've been updated.
|
|
|
|
assertPruneTip(t, graph, &blockHash, blockHeight)
|
|
|
|
asserNumChans(t, graph, 2)
|
|
|
|
|
|
|
|
// Finally, create a block that prunes the remainder of the channels
|
|
|
|
// from the graph.
|
2017-03-16 04:56:25 +03:00
|
|
|
blockHash = sha256.Sum256(blockHash[:])
|
2016-12-20 03:58:27 +03:00
|
|
|
blockHeight = 3
|
2017-03-06 04:22:18 +03:00
|
|
|
prunedChans, err = graph.PruneGraph(channelPoints[2:], &blockHash,
|
2016-12-20 03:58:27 +03:00
|
|
|
blockHeight)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to prune graph: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The remainder of the channels should've been pruned from the graph.
|
2017-03-06 04:22:18 +03:00
|
|
|
if len(prunedChans) != 2 {
|
2016-12-20 03:58:27 +03:00
|
|
|
t.Fatalf("incorrect number of channels pruned: expected %v, got %v",
|
2017-03-06 04:22:18 +03:00
|
|
|
2, len(prunedChans))
|
2016-12-20 03:58:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// The prune tip should be updated, and no channels should be found
|
|
|
|
// within the current graph.
|
|
|
|
assertPruneTip(t, graph, &blockHash, blockHeight)
|
|
|
|
asserNumChans(t, graph, 0)
|
2017-05-11 02:19:42 +03:00
|
|
|
|
|
|
|
// Finally, the channel view at this point in the graph should now be
|
|
|
|
// completely empty.
|
|
|
|
// Those channels should also be missing from the channel view.
|
|
|
|
channelView, err = graph.ChannelView()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get graph channel view: %v", err)
|
|
|
|
}
|
|
|
|
if len(channelView) != 0 {
|
|
|
|
t.Fatalf("channel view should be empty, instead have: %v",
|
|
|
|
channelView)
|
|
|
|
}
|
2016-12-20 03:58:27 +03:00
|
|
|
}
|
2017-03-20 12:24:55 +03:00
|
|
|
|
|
|
|
// compareNodes is used to compare two LightningNodes while excluding the
|
|
|
|
// Features struct, which cannot be compared as the semantics for reserializing
|
|
|
|
// the featuresMap have not been defined.
|
|
|
|
func compareNodes(a, b *LightningNode) error {
|
|
|
|
if !reflect.DeepEqual(a.LastUpdate, b.LastUpdate) {
|
|
|
|
return fmt.Errorf("LastUpdate doesn't match: expected %#v, \n"+
|
|
|
|
"got %#v", a.LastUpdate, b.LastUpdate)
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(a.Addresses, b.Addresses) {
|
|
|
|
return fmt.Errorf("Addresses doesn't match: expected %#v, \n "+
|
|
|
|
"got %#v", a.Addresses, b.Addresses)
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(a.PubKey, b.PubKey) {
|
|
|
|
return fmt.Errorf("PubKey doesn't match: expected %#v, \n "+
|
|
|
|
"got %#v", a.PubKey, b.PubKey)
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(a.Color, b.Color) {
|
|
|
|
return fmt.Errorf("Color doesn't match: expected %#v, \n "+
|
|
|
|
"got %#v", a.Color, b.Color)
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(a.Alias, b.Alias) {
|
|
|
|
return fmt.Errorf("Alias doesn't match: expected %#v, \n "+
|
|
|
|
"got %#v", a.Alias, b.Alias)
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(a.db, b.db) {
|
|
|
|
return fmt.Errorf("db doesn't match: expected %#v, \n "+
|
|
|
|
"got %#v", a.db, b.db)
|
|
|
|
}
|
2017-07-14 22:25:02 +03:00
|
|
|
if !reflect.DeepEqual(a.HaveNodeAnnouncement, b.HaveNodeAnnouncement) {
|
|
|
|
return fmt.Errorf("HaveNodeAnnouncement doesn't match: expected %#v, \n "+
|
|
|
|
"got %#v", a.HaveNodeAnnouncement, b.HaveNodeAnnouncement)
|
|
|
|
}
|
2017-03-20 12:24:55 +03:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// compareEdgePolicies is used to compare two ChannelEdgePolices using
|
|
|
|
// compareNodes, so as to exclude comparisons of the Nodes' Features struct.
|
|
|
|
func compareEdgePolicies(a, b *ChannelEdgePolicy) error {
|
|
|
|
if a.ChannelID != b.ChannelID {
|
|
|
|
return fmt.Errorf("ChannelID doesn't match: expected %v, "+
|
|
|
|
"got %v", a.ChannelID, b.ChannelID)
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(a.LastUpdate, b.LastUpdate) {
|
|
|
|
return fmt.Errorf("LastUpdate doesn't match: expected %#v, \n "+
|
|
|
|
"got %#v", a.LastUpdate, b.LastUpdate)
|
|
|
|
}
|
|
|
|
if a.Flags != b.Flags {
|
|
|
|
return fmt.Errorf("Flags doesn't match: expected %v, "+
|
|
|
|
"got %v", a.Flags, b.Flags)
|
|
|
|
}
|
|
|
|
if a.TimeLockDelta != b.TimeLockDelta {
|
|
|
|
return fmt.Errorf("TimeLockDelta doesn't match: expected %v, "+
|
|
|
|
"got %v", a.TimeLockDelta, b.TimeLockDelta)
|
|
|
|
}
|
|
|
|
if a.MinHTLC != b.MinHTLC {
|
|
|
|
return fmt.Errorf("MinHTLC doesn't match: expected %v, "+
|
|
|
|
"got %v", a.MinHTLC, b.MinHTLC)
|
|
|
|
}
|
|
|
|
if a.FeeBaseMSat != b.FeeBaseMSat {
|
|
|
|
return fmt.Errorf("FeeBaseMSat doesn't match: expected %v, "+
|
|
|
|
"got %v", a.FeeBaseMSat, b.FeeBaseMSat)
|
|
|
|
}
|
|
|
|
if a.FeeProportionalMillionths != b.FeeProportionalMillionths {
|
|
|
|
return fmt.Errorf("FeeProportionalMillionths doesn't match: "+
|
|
|
|
"expected %v, got %v", a.FeeProportionalMillionths,
|
|
|
|
b.FeeProportionalMillionths)
|
|
|
|
}
|
|
|
|
if err := compareNodes(a.Node, b.Node); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(a.db, b.db) {
|
|
|
|
return fmt.Errorf("db doesn't match: expected %#v, \n "+
|
|
|
|
"got %#v", a.db, b.db)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|