Merge pull request #4899 from halseth/ignore-local-ann-from-remote
[gossiper] Ignore received ChannelAnnouncements for own channels
This commit is contained in:
commit
b52e872878
@ -542,6 +542,22 @@ func (d *AuthenticatedGossiper) ProcessRemoteAnnouncement(msg lnwire.Message,
|
||||
|
||||
errChan <- nil
|
||||
return errChan
|
||||
|
||||
// To avoid inserting edges in the graph for our own channels that we
|
||||
// have already closed, we ignore such channel announcements coming
|
||||
// from the remote.
|
||||
case *lnwire.ChannelAnnouncement:
|
||||
ownKey := d.selfKey.SerializeCompressed()
|
||||
ownErr := fmt.Errorf("ignoring remote ChannelAnnouncement " +
|
||||
"for own channel")
|
||||
|
||||
if bytes.Equal(m.NodeID1[:], ownKey) ||
|
||||
bytes.Equal(m.NodeID2[:], ownKey) {
|
||||
|
||||
log.Warn(ownErr)
|
||||
errChan <- ownErr
|
||||
return errChan
|
||||
}
|
||||
}
|
||||
|
||||
nMsg := &networkMsg{
|
||||
@ -574,7 +590,7 @@ func (d *AuthenticatedGossiper) ProcessRemoteAnnouncement(msg lnwire.Message,
|
||||
// entire channel announcement and update messages will be re-constructed and
|
||||
// broadcast to the rest of the network.
|
||||
func (d *AuthenticatedGossiper) ProcessLocalAnnouncement(msg lnwire.Message,
|
||||
source *btcec.PublicKey, optionalFields ...OptionalMsgField) chan error {
|
||||
optionalFields ...OptionalMsgField) chan error {
|
||||
|
||||
optionalMsgFields := &optionalMsgFields{}
|
||||
optionalMsgFields.apply(optionalFields...)
|
||||
@ -583,7 +599,7 @@ func (d *AuthenticatedGossiper) ProcessLocalAnnouncement(msg lnwire.Message,
|
||||
msg: msg,
|
||||
optionalMsgFields: optionalMsgFields,
|
||||
isRemote: false,
|
||||
source: source,
|
||||
source: d.selfKey,
|
||||
err: make(chan error, 1),
|
||||
}
|
||||
|
||||
|
@ -48,17 +48,19 @@ var (
|
||||
_, _ = testSig.R.SetString("63724406601629180062774974542967536251589935445068131219452686511677818569431", 10)
|
||||
_, _ = testSig.S.SetString("18801056069249825825291287104931333862866033135609736119018462340006816851118", 10)
|
||||
|
||||
selfKeyPriv, _ = btcec.NewPrivateKey(btcec.S256())
|
||||
selfKeyPub = selfKeyPriv.PubKey()
|
||||
|
||||
bitcoinKeyPriv1, _ = btcec.NewPrivateKey(btcec.S256())
|
||||
bitcoinKeyPub1 = bitcoinKeyPriv1.PubKey()
|
||||
|
||||
nodeKeyPriv1, _ = btcec.NewPrivateKey(btcec.S256())
|
||||
nodeKeyPub1 = nodeKeyPriv1.PubKey()
|
||||
remoteKeyPriv1, _ = btcec.NewPrivateKey(btcec.S256())
|
||||
remoteKeyPub1 = remoteKeyPriv1.PubKey()
|
||||
|
||||
bitcoinKeyPriv2, _ = btcec.NewPrivateKey(btcec.S256())
|
||||
bitcoinKeyPub2 = bitcoinKeyPriv2.PubKey()
|
||||
|
||||
nodeKeyPriv2, _ = btcec.NewPrivateKey(btcec.S256())
|
||||
nodeKeyPub2 = nodeKeyPriv2.PubKey()
|
||||
remoteKeyPriv2, _ = btcec.NewPrivateKey(btcec.S256())
|
||||
|
||||
trickleDelay = time.Millisecond * 100
|
||||
retransmitDelay = time.Hour * 1
|
||||
@ -454,8 +456,7 @@ type annBatch struct {
|
||||
nodeAnn1 *lnwire.NodeAnnouncement
|
||||
nodeAnn2 *lnwire.NodeAnnouncement
|
||||
|
||||
localChanAnn *lnwire.ChannelAnnouncement
|
||||
remoteChanAnn *lnwire.ChannelAnnouncement
|
||||
chanAnn *lnwire.ChannelAnnouncement
|
||||
|
||||
chanUpdAnn1 *lnwire.ChannelUpdate
|
||||
chanUpdAnn2 *lnwire.ChannelUpdate
|
||||
@ -464,22 +465,30 @@ type annBatch struct {
|
||||
remoteProofAnn *lnwire.AnnounceSignatures
|
||||
}
|
||||
|
||||
func createAnnouncements(blockHeight uint32) (*annBatch, error) {
|
||||
func createLocalAnnouncements(blockHeight uint32) (*annBatch, error) {
|
||||
return createAnnouncements(blockHeight, selfKeyPriv, remoteKeyPriv1)
|
||||
}
|
||||
|
||||
func createRemoteAnnouncements(blockHeight uint32) (*annBatch, error) {
|
||||
return createAnnouncements(blockHeight, remoteKeyPriv1, remoteKeyPriv2)
|
||||
}
|
||||
|
||||
func createAnnouncements(blockHeight uint32, key1, key2 *btcec.PrivateKey) (*annBatch, error) {
|
||||
var err error
|
||||
var batch annBatch
|
||||
timestamp := testTimestamp
|
||||
|
||||
batch.nodeAnn1, err = createNodeAnnouncement(nodeKeyPriv1, timestamp)
|
||||
batch.nodeAnn1, err = createNodeAnnouncement(key1, timestamp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
batch.nodeAnn2, err = createNodeAnnouncement(nodeKeyPriv2, timestamp)
|
||||
batch.nodeAnn2, err = createNodeAnnouncement(key2, timestamp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
batch.remoteChanAnn, err = createRemoteChannelAnnouncement(blockHeight)
|
||||
batch.chanAnn, err = createChannelAnnouncement(blockHeight, key1, key2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -488,32 +497,27 @@ func createAnnouncements(blockHeight uint32) (*annBatch, error) {
|
||||
ShortChannelID: lnwire.ShortChannelID{
|
||||
BlockHeight: blockHeight,
|
||||
},
|
||||
NodeSignature: batch.remoteChanAnn.NodeSig2,
|
||||
BitcoinSignature: batch.remoteChanAnn.BitcoinSig2,
|
||||
}
|
||||
|
||||
batch.localChanAnn, err = createRemoteChannelAnnouncement(blockHeight)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
NodeSignature: batch.chanAnn.NodeSig2,
|
||||
BitcoinSignature: batch.chanAnn.BitcoinSig2,
|
||||
}
|
||||
|
||||
batch.localProofAnn = &lnwire.AnnounceSignatures{
|
||||
ShortChannelID: lnwire.ShortChannelID{
|
||||
BlockHeight: blockHeight,
|
||||
},
|
||||
NodeSignature: batch.localChanAnn.NodeSig1,
|
||||
BitcoinSignature: batch.localChanAnn.BitcoinSig1,
|
||||
NodeSignature: batch.chanAnn.NodeSig1,
|
||||
BitcoinSignature: batch.chanAnn.BitcoinSig1,
|
||||
}
|
||||
|
||||
batch.chanUpdAnn1, err = createUpdateAnnouncement(
|
||||
blockHeight, 0, nodeKeyPriv1, timestamp,
|
||||
blockHeight, 0, key1, timestamp,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
batch.chanUpdAnn2, err = createUpdateAnnouncement(
|
||||
blockHeight, 1, nodeKeyPriv2, timestamp,
|
||||
blockHeight, 1, key2, timestamp,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -611,6 +615,7 @@ func signUpdate(nodeKey *btcec.PrivateKey, a *lnwire.ChannelUpdate) error {
|
||||
}
|
||||
|
||||
func createAnnouncementWithoutProof(blockHeight uint32,
|
||||
key1, key2 *btcec.PublicKey,
|
||||
extraBytes ...[]byte) *lnwire.ChannelAnnouncement {
|
||||
|
||||
a := &lnwire.ChannelAnnouncement{
|
||||
@ -621,8 +626,8 @@ func createAnnouncementWithoutProof(blockHeight uint32,
|
||||
},
|
||||
Features: testFeatures,
|
||||
}
|
||||
copy(a.NodeID1[:], nodeKeyPub1.SerializeCompressed())
|
||||
copy(a.NodeID2[:], nodeKeyPub2.SerializeCompressed())
|
||||
copy(a.NodeID1[:], key1.SerializeCompressed())
|
||||
copy(a.NodeID2[:], key2.SerializeCompressed())
|
||||
copy(a.BitcoinKey1[:], bitcoinKeyPub1.SerializeCompressed())
|
||||
copy(a.BitcoinKey2[:], bitcoinKeyPub2.SerializeCompressed())
|
||||
if len(extraBytes) == 1 {
|
||||
@ -635,10 +640,16 @@ func createAnnouncementWithoutProof(blockHeight uint32,
|
||||
func createRemoteChannelAnnouncement(blockHeight uint32,
|
||||
extraBytes ...[]byte) (*lnwire.ChannelAnnouncement, error) {
|
||||
|
||||
a := createAnnouncementWithoutProof(blockHeight, extraBytes...)
|
||||
return createChannelAnnouncement(blockHeight, remoteKeyPriv1, remoteKeyPriv2, extraBytes...)
|
||||
}
|
||||
|
||||
pub := nodeKeyPriv1.PubKey()
|
||||
signer := mock.SingleSigner{Privkey: nodeKeyPriv1}
|
||||
func createChannelAnnouncement(blockHeight uint32, key1, key2 *btcec.PrivateKey,
|
||||
extraBytes ...[]byte) (*lnwire.ChannelAnnouncement, error) {
|
||||
|
||||
a := createAnnouncementWithoutProof(blockHeight, key1.PubKey(), key2.PubKey(), extraBytes...)
|
||||
|
||||
pub := key1.PubKey()
|
||||
signer := mock.SingleSigner{Privkey: key1}
|
||||
sig, err := netann.SignAnnouncement(&signer, pub, a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -648,8 +659,8 @@ func createRemoteChannelAnnouncement(blockHeight uint32,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pub = nodeKeyPriv2.PubKey()
|
||||
signer = mock.SingleSigner{Privkey: nodeKeyPriv2}
|
||||
pub = key2.PubKey()
|
||||
signer = mock.SingleSigner{Privkey: key2}
|
||||
sig, err = netann.SignAnnouncement(&signer, pub, a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -750,12 +761,12 @@ func createTestCtx(startHeight uint32) (*testCtx, func(), error) {
|
||||
RotateTicker: ticker.NewForce(DefaultSyncerRotationInterval),
|
||||
HistoricalSyncTicker: ticker.NewForce(DefaultHistoricalSyncInterval),
|
||||
NumActiveSyncers: 3,
|
||||
AnnSigner: &mock.SingleSigner{Privkey: nodeKeyPriv1},
|
||||
AnnSigner: &mock.SingleSigner{Privkey: selfKeyPriv},
|
||||
SubBatchDelay: time.Second * 5,
|
||||
MinimumBatchSize: 10,
|
||||
MaxChannelUpdateBurst: DefaultMaxChannelUpdateBurst,
|
||||
ChannelUpdateInterval: DefaultChannelUpdateInterval,
|
||||
}, nodeKeyPub1)
|
||||
}, selfKeyPub)
|
||||
|
||||
if err := gossiper.Start(); err != nil {
|
||||
cleanUpDb()
|
||||
@ -798,7 +809,7 @@ func TestProcessAnnouncement(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
nodePeer := &mockPeer{nodeKeyPriv1.PubKey(), nil, nil}
|
||||
nodePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil}
|
||||
|
||||
// First, we'll craft a valid remote channel announcement and send it to
|
||||
// the gossiper so that it can be processed.
|
||||
@ -831,7 +842,7 @@ func TestProcessAnnouncement(t *testing.T) {
|
||||
|
||||
// We'll then craft the channel policy of the remote party and also send
|
||||
// it to the gossiper.
|
||||
ua, err := createUpdateAnnouncement(0, 0, nodeKeyPriv1, timestamp)
|
||||
ua, err := createUpdateAnnouncement(0, 0, remoteKeyPriv1, timestamp)
|
||||
if err != nil {
|
||||
t.Fatalf("can't create update announcement: %v", err)
|
||||
}
|
||||
@ -858,7 +869,7 @@ func TestProcessAnnouncement(t *testing.T) {
|
||||
}
|
||||
|
||||
// Finally, we'll craft the remote party's node announcement.
|
||||
na, err := createNodeAnnouncement(nodeKeyPriv1, timestamp)
|
||||
na, err := createNodeAnnouncement(remoteKeyPriv1, timestamp)
|
||||
if err != nil {
|
||||
t.Fatalf("can't create node announcement: %v", err)
|
||||
}
|
||||
@ -900,12 +911,12 @@ func TestPrematureAnnouncement(t *testing.T) {
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
_, err = createNodeAnnouncement(nodeKeyPriv1, timestamp)
|
||||
_, err = createNodeAnnouncement(remoteKeyPriv1, timestamp)
|
||||
if err != nil {
|
||||
t.Fatalf("can't create node announcement: %v", err)
|
||||
}
|
||||
|
||||
nodePeer := &mockPeer{nodeKeyPriv1.PubKey(), nil, nil}
|
||||
nodePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil}
|
||||
|
||||
// Pretending that we receive the valid channel announcement from
|
||||
// remote side, but block height of this announcement is greater than
|
||||
@ -929,7 +940,7 @@ func TestPrematureAnnouncement(t *testing.T) {
|
||||
// Pretending that we receive the valid channel update announcement from
|
||||
// remote side, but block height of this announcement is greater than
|
||||
// highest known to us, so it should be rejected.
|
||||
ua, err := createUpdateAnnouncement(1, 0, nodeKeyPriv1, timestamp)
|
||||
ua, err := createUpdateAnnouncement(1, 0, remoteKeyPriv1, timestamp)
|
||||
if err != nil {
|
||||
t.Fatalf("can't create update announcement: %v", err)
|
||||
}
|
||||
@ -970,15 +981,11 @@ func TestSignatureAnnouncementLocalFirst(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
batch, err := createAnnouncements(0)
|
||||
batch, err := createLocalAnnouncements(0)
|
||||
if err != nil {
|
||||
t.Fatalf("can't generate announcements: %v", err)
|
||||
}
|
||||
|
||||
localKey, err := btcec.ParsePubKey(batch.nodeAnn1.NodeID[:], btcec.S256())
|
||||
if err != nil {
|
||||
t.Fatalf("unable to parse pubkey: %v", err)
|
||||
}
|
||||
remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:], btcec.S256())
|
||||
if err != nil {
|
||||
t.Fatalf("unable to parse pubkey: %v", err)
|
||||
@ -988,9 +995,7 @@ func TestSignatureAnnouncementLocalFirst(t *testing.T) {
|
||||
// Recreate lightning network topology. Initialize router with channel
|
||||
// between two nodes.
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(
|
||||
batch.localChanAnn, localKey,
|
||||
):
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanAnn):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process local announcement")
|
||||
}
|
||||
@ -1004,9 +1009,7 @@ func TestSignatureAnnouncementLocalFirst(t *testing.T) {
|
||||
}
|
||||
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(
|
||||
batch.chanUpdAnn1, localKey,
|
||||
):
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanUpdAnn1):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process local announcement")
|
||||
}
|
||||
@ -1020,9 +1023,7 @@ func TestSignatureAnnouncementLocalFirst(t *testing.T) {
|
||||
}
|
||||
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(
|
||||
batch.nodeAnn1, localKey,
|
||||
):
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.nodeAnn1):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process local announcement")
|
||||
}
|
||||
@ -1080,9 +1081,7 @@ func TestSignatureAnnouncementLocalFirst(t *testing.T) {
|
||||
// Pretending that we receive local channel announcement from funding
|
||||
// manager, thereby kick off the announcement exchange process.
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(
|
||||
batch.localProofAnn, localKey,
|
||||
):
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.localProofAnn):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process remote announcement")
|
||||
}
|
||||
@ -1175,15 +1174,11 @@ func TestOrphanSignatureAnnouncement(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
batch, err := createAnnouncements(0)
|
||||
batch, err := createLocalAnnouncements(0)
|
||||
if err != nil {
|
||||
t.Fatalf("can't generate announcements: %v", err)
|
||||
}
|
||||
|
||||
localKey, err := btcec.ParsePubKey(batch.nodeAnn1.NodeID[:], btcec.S256())
|
||||
if err != nil {
|
||||
t.Fatalf("unable to parse pubkey: %v", err)
|
||||
}
|
||||
remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:], btcec.S256())
|
||||
if err != nil {
|
||||
t.Fatalf("unable to parse pubkey: %v", err)
|
||||
@ -1224,8 +1219,7 @@ func TestOrphanSignatureAnnouncement(t *testing.T) {
|
||||
// Recreate lightning network topology. Initialize router with channel
|
||||
// between two nodes.
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.localChanAnn,
|
||||
localKey):
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanAnn):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process local announcement")
|
||||
}
|
||||
@ -1241,8 +1235,7 @@ func TestOrphanSignatureAnnouncement(t *testing.T) {
|
||||
}
|
||||
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanUpdAnn1,
|
||||
localKey):
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanUpdAnn1):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process local announcement")
|
||||
}
|
||||
@ -1257,9 +1250,7 @@ func TestOrphanSignatureAnnouncement(t *testing.T) {
|
||||
}
|
||||
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(
|
||||
batch.nodeAnn1, localKey,
|
||||
):
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.nodeAnn1):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process local announcement")
|
||||
}
|
||||
@ -1316,8 +1307,7 @@ func TestOrphanSignatureAnnouncement(t *testing.T) {
|
||||
// After that we process local announcement, and waiting to receive
|
||||
// the channel announcement.
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.localProofAnn,
|
||||
localKey):
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.localProofAnn):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process remote announcement")
|
||||
}
|
||||
@ -1374,15 +1364,11 @@ func TestSignatureAnnouncementRetryAtStartup(t *testing.T) {
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
batch, err := createAnnouncements(0)
|
||||
batch, err := createLocalAnnouncements(0)
|
||||
if err != nil {
|
||||
t.Fatalf("can't generate announcements: %v", err)
|
||||
}
|
||||
|
||||
localKey, err := btcec.ParsePubKey(batch.nodeAnn1.NodeID[:], btcec.S256())
|
||||
if err != nil {
|
||||
t.Fatalf("unable to parse pubkey: %v", err)
|
||||
}
|
||||
remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:], btcec.S256())
|
||||
if err != nil {
|
||||
t.Fatalf("unable to parse pubkey: %v", err)
|
||||
@ -1405,9 +1391,7 @@ func TestSignatureAnnouncementRetryAtStartup(t *testing.T) {
|
||||
// Recreate lightning network topology. Initialize router with channel
|
||||
// between two nodes.
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(
|
||||
batch.localChanAnn, localKey,
|
||||
):
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanAnn):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process local announcement")
|
||||
}
|
||||
@ -1424,7 +1408,7 @@ func TestSignatureAnnouncementRetryAtStartup(t *testing.T) {
|
||||
// manager, thereby kick off the announcement exchange process.
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(
|
||||
batch.localProofAnn, localKey,
|
||||
batch.localProofAnn,
|
||||
):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process remote announcement")
|
||||
@ -1594,15 +1578,11 @@ func TestSignatureAnnouncementFullProofWhenRemoteProof(t *testing.T) {
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
batch, err := createAnnouncements(0)
|
||||
batch, err := createLocalAnnouncements(0)
|
||||
if err != nil {
|
||||
t.Fatalf("can't generate announcements: %v", err)
|
||||
}
|
||||
|
||||
localKey, err := btcec.ParsePubKey(batch.nodeAnn1.NodeID[:], btcec.S256())
|
||||
if err != nil {
|
||||
t.Fatalf("unable to parse pubkey: %v", err)
|
||||
}
|
||||
remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:], btcec.S256())
|
||||
if err != nil {
|
||||
t.Fatalf("unable to parse pubkey: %v", err)
|
||||
@ -1625,7 +1605,7 @@ func TestSignatureAnnouncementFullProofWhenRemoteProof(t *testing.T) {
|
||||
// between two nodes.
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(
|
||||
batch.localChanAnn, localKey,
|
||||
batch.chanAnn,
|
||||
):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process local announcement")
|
||||
@ -1641,7 +1621,7 @@ func TestSignatureAnnouncementFullProofWhenRemoteProof(t *testing.T) {
|
||||
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(
|
||||
batch.chanUpdAnn1, localKey,
|
||||
batch.chanUpdAnn1,
|
||||
):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process local announcement")
|
||||
@ -1664,7 +1644,7 @@ func TestSignatureAnnouncementFullProofWhenRemoteProof(t *testing.T) {
|
||||
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(
|
||||
batch.nodeAnn1, localKey,
|
||||
batch.nodeAnn1,
|
||||
):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process local announcement")
|
||||
@ -1714,7 +1694,7 @@ func TestSignatureAnnouncementFullProofWhenRemoteProof(t *testing.T) {
|
||||
// manager, thereby kick off the announcement exchange process.
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(
|
||||
batch.localProofAnn, localKey,
|
||||
batch.localProofAnn,
|
||||
):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process local announcement")
|
||||
@ -1850,7 +1830,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) {
|
||||
// Next, we'll ensure that channel update announcements are properly
|
||||
// stored and de-duplicated. We do this by creating two updates
|
||||
// announcements with the same short ID and flag.
|
||||
ua, err := createUpdateAnnouncement(0, 0, nodeKeyPriv1, timestamp)
|
||||
ua, err := createUpdateAnnouncement(0, 0, remoteKeyPriv1, timestamp)
|
||||
if err != nil {
|
||||
t.Fatalf("can't create update announcement: %v", err)
|
||||
}
|
||||
@ -1865,7 +1845,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) {
|
||||
|
||||
// Adding the very same announcement shouldn't cause an increase in the
|
||||
// number of ChannelUpdate announcements stored.
|
||||
ua2, err := createUpdateAnnouncement(0, 0, nodeKeyPriv1, timestamp)
|
||||
ua2, err := createUpdateAnnouncement(0, 0, remoteKeyPriv1, timestamp)
|
||||
if err != nil {
|
||||
t.Fatalf("can't create update announcement: %v", err)
|
||||
}
|
||||
@ -1880,7 +1860,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) {
|
||||
|
||||
// Adding an announcement with a later timestamp should replace the
|
||||
// stored one.
|
||||
ua3, err := createUpdateAnnouncement(0, 0, nodeKeyPriv1, timestamp+1)
|
||||
ua3, err := createUpdateAnnouncement(0, 0, remoteKeyPriv1, timestamp+1)
|
||||
if err != nil {
|
||||
t.Fatalf("can't create update announcement: %v", err)
|
||||
}
|
||||
@ -1914,7 +1894,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) {
|
||||
|
||||
// Adding a channel update with an earlier timestamp should NOT
|
||||
// replace the one stored.
|
||||
ua4, err := createUpdateAnnouncement(0, 0, nodeKeyPriv1, timestamp)
|
||||
ua4, err := createUpdateAnnouncement(0, 0, remoteKeyPriv1, timestamp)
|
||||
if err != nil {
|
||||
t.Fatalf("can't create update announcement: %v", err)
|
||||
}
|
||||
@ -1930,7 +1910,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) {
|
||||
|
||||
// Next well ensure that node announcements are properly de-duplicated.
|
||||
// We'll first add a single instance with a node's private key.
|
||||
na, err := createNodeAnnouncement(nodeKeyPriv1, timestamp)
|
||||
na, err := createNodeAnnouncement(remoteKeyPriv1, timestamp)
|
||||
if err != nil {
|
||||
t.Fatalf("can't create node announcement: %v", err)
|
||||
}
|
||||
@ -1944,7 +1924,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) {
|
||||
}
|
||||
|
||||
// We'll now add another node to the batch.
|
||||
na2, err := createNodeAnnouncement(nodeKeyPriv2, timestamp)
|
||||
na2, err := createNodeAnnouncement(remoteKeyPriv2, timestamp)
|
||||
if err != nil {
|
||||
t.Fatalf("can't create node announcement: %v", err)
|
||||
}
|
||||
@ -1959,7 +1939,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) {
|
||||
|
||||
// Adding a new instance of the _same_ node shouldn't increase the size
|
||||
// of the node ann batch.
|
||||
na3, err := createNodeAnnouncement(nodeKeyPriv2, timestamp)
|
||||
na3, err := createNodeAnnouncement(remoteKeyPriv2, timestamp)
|
||||
if err != nil {
|
||||
t.Fatalf("can't create node announcement: %v", err)
|
||||
}
|
||||
@ -1974,7 +1954,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) {
|
||||
|
||||
// Ensure that node announcement with different pointer to same public
|
||||
// key is still de-duplicated.
|
||||
newNodeKeyPointer := nodeKeyPriv2
|
||||
newNodeKeyPointer := remoteKeyPriv2
|
||||
na4, err := createNodeAnnouncement(newNodeKeyPointer, timestamp)
|
||||
if err != nil {
|
||||
t.Fatalf("can't create node announcement: %v", err)
|
||||
@ -1990,7 +1970,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) {
|
||||
|
||||
// Ensure that node announcement with increased timestamp replaces
|
||||
// what is currently stored.
|
||||
na5, err := createNodeAnnouncement(nodeKeyPriv2, timestamp+1)
|
||||
na5, err := createNodeAnnouncement(remoteKeyPriv2, timestamp+1)
|
||||
if err != nil {
|
||||
t.Fatalf("can't create node announcement: %v", err)
|
||||
}
|
||||
@ -2002,7 +1982,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) {
|
||||
if len(announcements.nodeAnnouncements) != 2 {
|
||||
t.Fatal("node announcement not replaced in batch")
|
||||
}
|
||||
nodeID := route.NewVertex(nodeKeyPriv2.PubKey())
|
||||
nodeID := route.NewVertex(remoteKeyPriv2.PubKey())
|
||||
stored, ok := announcements.nodeAnnouncements[nodeID]
|
||||
if !ok {
|
||||
t.Fatalf("node announcement not found in batch")
|
||||
@ -2076,11 +2056,11 @@ func TestForwardPrivateNodeAnnouncement(t *testing.T) {
|
||||
// We'll start off by processing a channel announcement without a proof
|
||||
// (i.e., an unadvertised channel), followed by a node announcement for
|
||||
// this same channel announcement.
|
||||
chanAnn := createAnnouncementWithoutProof(startingHeight - 2)
|
||||
pubKey := nodeKeyPriv1.PubKey()
|
||||
chanAnn := createAnnouncementWithoutProof(startingHeight-2, selfKeyPub, remoteKeyPub1)
|
||||
pubKey := remoteKeyPriv1.PubKey()
|
||||
|
||||
select {
|
||||
case err := <-ctx.gossiper.ProcessLocalAnnouncement(chanAnn, pubKey):
|
||||
case err := <-ctx.gossiper.ProcessLocalAnnouncement(chanAnn):
|
||||
if err != nil {
|
||||
t.Fatalf("unable to process local announcement: %v", err)
|
||||
}
|
||||
@ -2096,13 +2076,13 @@ func TestForwardPrivateNodeAnnouncement(t *testing.T) {
|
||||
case <-time.After(2 * trickleDelay):
|
||||
}
|
||||
|
||||
nodeAnn, err := createNodeAnnouncement(nodeKeyPriv1, timestamp)
|
||||
nodeAnn, err := createNodeAnnouncement(remoteKeyPriv1, timestamp)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create node announcement: %v", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case err := <-ctx.gossiper.ProcessLocalAnnouncement(nodeAnn, pubKey):
|
||||
case err := <-ctx.gossiper.ProcessLocalAnnouncement(nodeAnn):
|
||||
if err != nil {
|
||||
t.Fatalf("unable to process remote announcement: %v", err)
|
||||
}
|
||||
@ -2145,7 +2125,7 @@ func TestForwardPrivateNodeAnnouncement(t *testing.T) {
|
||||
|
||||
// We'll recreate the NodeAnnouncement with an updated timestamp to
|
||||
// prevent a stale update. The NodeAnnouncement should now be forwarded.
|
||||
nodeAnn, err = createNodeAnnouncement(nodeKeyPriv1, timestamp+1)
|
||||
nodeAnn, err = createNodeAnnouncement(remoteKeyPriv1, timestamp+1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create node announcement: %v", err)
|
||||
}
|
||||
@ -2179,11 +2159,11 @@ func TestRejectZombieEdge(t *testing.T) {
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
batch, err := createAnnouncements(0)
|
||||
batch, err := createRemoteAnnouncements(0)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create announcements: %v", err)
|
||||
}
|
||||
remotePeer := &mockPeer{pk: nodeKeyPriv2.PubKey()}
|
||||
remotePeer := &mockPeer{pk: remoteKeyPriv2.PubKey()}
|
||||
|
||||
// processAnnouncements is a helper closure we'll use to test that we
|
||||
// properly process/reject announcements based on whether they're for a
|
||||
@ -2192,7 +2172,7 @@ func TestRejectZombieEdge(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
errChan := ctx.gossiper.ProcessRemoteAnnouncement(
|
||||
batch.remoteChanAnn, remotePeer,
|
||||
batch.chanAnn, remotePeer,
|
||||
)
|
||||
select {
|
||||
case err := <-errChan:
|
||||
@ -2253,9 +2233,9 @@ func TestRejectZombieEdge(t *testing.T) {
|
||||
// We'll mark the edge for which we'll process announcements for as a
|
||||
// zombie within the router. This should reject any announcements for
|
||||
// this edge while it remains as a zombie.
|
||||
chanID := batch.remoteChanAnn.ShortChannelID
|
||||
chanID := batch.chanAnn.ShortChannelID
|
||||
err = ctx.router.MarkEdgeZombie(
|
||||
chanID, batch.remoteChanAnn.NodeID1, batch.remoteChanAnn.NodeID2,
|
||||
chanID, batch.chanAnn.NodeID1, batch.chanAnn.NodeID2,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to mark channel %v as zombie: %v", chanID, err)
|
||||
@ -2285,15 +2265,12 @@ func TestProcessZombieEdgeNowLive(t *testing.T) {
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
batch, err := createAnnouncements(0)
|
||||
batch, err := createRemoteAnnouncements(0)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create announcements: %v", err)
|
||||
}
|
||||
|
||||
localPrivKey := nodeKeyPriv1
|
||||
remotePrivKey := nodeKeyPriv2
|
||||
|
||||
remotePeer := &mockPeer{pk: remotePrivKey.PubKey()}
|
||||
remotePeer := &mockPeer{pk: remoteKeyPriv1.PubKey()}
|
||||
|
||||
// processAnnouncement is a helper closure we'll use to ensure an
|
||||
// announcement is properly processed/rejected based on whether the edge
|
||||
@ -2342,14 +2319,14 @@ func TestProcessZombieEdgeNowLive(t *testing.T) {
|
||||
// past to consider it a zombie.
|
||||
zombieTimestamp := time.Now().Add(-routing.DefaultChannelPruneExpiry)
|
||||
batch.chanUpdAnn2.Timestamp = uint32(zombieTimestamp.Unix())
|
||||
if err := signUpdate(remotePrivKey, batch.chanUpdAnn2); err != nil {
|
||||
if err := signUpdate(remoteKeyPriv2, batch.chanUpdAnn2); err != nil {
|
||||
t.Fatalf("unable to sign update with new timestamp: %v", err)
|
||||
}
|
||||
|
||||
// We'll also add the edge to our zombie index.
|
||||
chanID := batch.remoteChanAnn.ShortChannelID
|
||||
chanID := batch.chanAnn.ShortChannelID
|
||||
err = ctx.router.MarkEdgeZombie(
|
||||
chanID, batch.remoteChanAnn.NodeID1, batch.remoteChanAnn.NodeID2,
|
||||
chanID, batch.chanAnn.NodeID1, batch.chanAnn.NodeID2,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable mark channel %v as zombie: %v", chanID, err)
|
||||
@ -2365,9 +2342,9 @@ func TestProcessZombieEdgeNowLive(t *testing.T) {
|
||||
// allow the channel update to be processed even though it is still
|
||||
// marked as a zombie within the index, since it is a fresh new update.
|
||||
// This won't work however since we'll sign it with the wrong private
|
||||
// key (local rather than remote).
|
||||
// key (remote key 1 rather than remote key 2).
|
||||
batch.chanUpdAnn2.Timestamp = uint32(time.Now().Unix())
|
||||
if err := signUpdate(localPrivKey, batch.chanUpdAnn2); err != nil {
|
||||
if err := signUpdate(remoteKeyPriv1, batch.chanUpdAnn2); err != nil {
|
||||
t.Fatalf("unable to sign update with new timestamp: %v", err)
|
||||
}
|
||||
|
||||
@ -2376,7 +2353,7 @@ func TestProcessZombieEdgeNowLive(t *testing.T) {
|
||||
|
||||
// Signing it with the correct private key should allow it to be
|
||||
// processed.
|
||||
if err := signUpdate(remotePrivKey, batch.chanUpdAnn2); err != nil {
|
||||
if err := signUpdate(remoteKeyPriv2, batch.chanUpdAnn2); err != nil {
|
||||
t.Fatalf("unable to sign update with new timestamp: %v", err)
|
||||
}
|
||||
|
||||
@ -2397,7 +2374,7 @@ func TestProcessZombieEdgeNowLive(t *testing.T) {
|
||||
|
||||
// We'll go ahead and process the channel announcement to ensure the
|
||||
// channel update is processed thereafter.
|
||||
processAnnouncement(batch.remoteChanAnn, false, false)
|
||||
processAnnouncement(batch.chanAnn, false, false)
|
||||
|
||||
// After successfully processing the announcement, the channel update
|
||||
// should have been processed and broadcast successfully as well.
|
||||
@ -2431,15 +2408,11 @@ func TestReceiveRemoteChannelUpdateFirst(t *testing.T) {
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
batch, err := createAnnouncements(0)
|
||||
batch, err := createLocalAnnouncements(0)
|
||||
if err != nil {
|
||||
t.Fatalf("can't generate announcements: %v", err)
|
||||
}
|
||||
|
||||
localKey, err := btcec.ParsePubKey(batch.nodeAnn1.NodeID[:], btcec.S256())
|
||||
if err != nil {
|
||||
t.Fatalf("unable to parse pubkey: %v", err)
|
||||
}
|
||||
remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:], btcec.S256())
|
||||
if err != nil {
|
||||
t.Fatalf("unable to parse pubkey: %v", err)
|
||||
@ -2500,7 +2473,7 @@ func TestReceiveRemoteChannelUpdateFirst(t *testing.T) {
|
||||
|
||||
// Recreate lightning network topology. Initialize router with channel
|
||||
// between two nodes.
|
||||
err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.localChanAnn, localKey)
|
||||
err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanAnn)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to process :%v", err)
|
||||
}
|
||||
@ -2510,7 +2483,7 @@ func TestReceiveRemoteChannelUpdateFirst(t *testing.T) {
|
||||
case <-time.After(2 * trickleDelay):
|
||||
}
|
||||
|
||||
err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanUpdAnn1, localKey)
|
||||
err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanUpdAnn1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to process :%v", err)
|
||||
}
|
||||
@ -2520,7 +2493,7 @@ func TestReceiveRemoteChannelUpdateFirst(t *testing.T) {
|
||||
case <-time.After(2 * trickleDelay):
|
||||
}
|
||||
|
||||
err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.nodeAnn1, localKey)
|
||||
err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.nodeAnn1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to process :%v", err)
|
||||
}
|
||||
@ -2570,9 +2543,7 @@ func TestReceiveRemoteChannelUpdateFirst(t *testing.T) {
|
||||
|
||||
// Pretending that we receive local channel announcement from funding
|
||||
// manager, thereby kick off the announcement exchange process.
|
||||
err = <-ctx.gossiper.ProcessLocalAnnouncement(
|
||||
batch.localProofAnn, localKey,
|
||||
)
|
||||
err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.localProofAnn)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to process :%v", err)
|
||||
}
|
||||
@ -2645,7 +2616,7 @@ func TestExtraDataChannelAnnouncementValidation(t *testing.T) {
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
remotePeer := &mockPeer{nodeKeyPriv1.PubKey(), nil, nil}
|
||||
remotePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil}
|
||||
|
||||
// We'll now create an announcement that contains an extra set of bytes
|
||||
// that we don't know of ourselves, but should still include in the
|
||||
@ -2681,7 +2652,7 @@ func TestExtraDataChannelUpdateValidation(t *testing.T) {
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
remotePeer := &mockPeer{nodeKeyPriv1.PubKey(), nil, nil}
|
||||
remotePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil}
|
||||
|
||||
// In this scenario, we'll create two announcements, one regular
|
||||
// channel announcement, and another channel update announcement, that
|
||||
@ -2691,14 +2662,14 @@ func TestExtraDataChannelUpdateValidation(t *testing.T) {
|
||||
t.Fatalf("unable to create chan ann: %v", err)
|
||||
}
|
||||
chanUpdAnn1, err := createUpdateAnnouncement(
|
||||
0, 0, nodeKeyPriv1, timestamp,
|
||||
0, 0, remoteKeyPriv1, timestamp,
|
||||
[]byte("must also validate"),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create chan up: %v", err)
|
||||
}
|
||||
chanUpdAnn2, err := createUpdateAnnouncement(
|
||||
0, 1, nodeKeyPriv2, timestamp,
|
||||
0, 1, remoteKeyPriv2, timestamp,
|
||||
[]byte("must also validate"),
|
||||
)
|
||||
if err != nil {
|
||||
@ -2747,14 +2718,14 @@ func TestExtraDataNodeAnnouncementValidation(t *testing.T) {
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
remotePeer := &mockPeer{nodeKeyPriv1.PubKey(), nil, nil}
|
||||
remotePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil}
|
||||
timestamp := testTimestamp
|
||||
|
||||
// We'll create a node announcement that includes a set of opaque data
|
||||
// which we don't know of, but will store anyway in order to ensure
|
||||
// upgrades can flow smoothly in the future.
|
||||
nodeAnn, err := createNodeAnnouncement(
|
||||
nodeKeyPriv1, timestamp, []byte("gotta validate"),
|
||||
remoteKeyPriv1, timestamp, []byte("gotta validate"),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("can't create node announcement: %v", err)
|
||||
@ -2822,15 +2793,11 @@ func TestRetransmit(t *testing.T) {
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
batch, err := createAnnouncements(0)
|
||||
batch, err := createLocalAnnouncements(0)
|
||||
if err != nil {
|
||||
t.Fatalf("can't generate announcements: %v", err)
|
||||
}
|
||||
|
||||
localKey, err := btcec.ParsePubKey(batch.nodeAnn1.NodeID[:], btcec.S256())
|
||||
if err != nil {
|
||||
t.Fatalf("unable to parse pubkey: %v", err)
|
||||
}
|
||||
remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:], btcec.S256())
|
||||
if err != nil {
|
||||
t.Fatalf("unable to parse pubkey: %v", err)
|
||||
@ -2841,23 +2808,17 @@ func TestRetransmit(t *testing.T) {
|
||||
// announcement. No messages should be broadcasted yet, since no proof
|
||||
// has been exchanged.
|
||||
assertProcessAnnouncement(
|
||||
t, ctx.gossiper.ProcessLocalAnnouncement(
|
||||
batch.localChanAnn, localKey,
|
||||
),
|
||||
t, ctx.gossiper.ProcessLocalAnnouncement(batch.chanAnn),
|
||||
)
|
||||
assertBroadcast(t, ctx, 0)
|
||||
|
||||
assertProcessAnnouncement(
|
||||
t, ctx.gossiper.ProcessLocalAnnouncement(
|
||||
batch.chanUpdAnn1, localKey,
|
||||
),
|
||||
t, ctx.gossiper.ProcessLocalAnnouncement(batch.chanUpdAnn1),
|
||||
)
|
||||
assertBroadcast(t, ctx, 0)
|
||||
|
||||
assertProcessAnnouncement(
|
||||
t, ctx.gossiper.ProcessLocalAnnouncement(
|
||||
batch.nodeAnn1, localKey,
|
||||
),
|
||||
t, ctx.gossiper.ProcessLocalAnnouncement(batch.nodeAnn1),
|
||||
)
|
||||
assertBroadcast(t, ctx, 0)
|
||||
|
||||
@ -2873,9 +2834,7 @@ func TestRetransmit(t *testing.T) {
|
||||
// Now add the local and remote proof to the gossiper, which should
|
||||
// trigger a broadcast of the announcements.
|
||||
assertProcessAnnouncement(
|
||||
t, ctx.gossiper.ProcessLocalAnnouncement(
|
||||
batch.localProofAnn, localKey,
|
||||
),
|
||||
t, ctx.gossiper.ProcessLocalAnnouncement(batch.localProofAnn),
|
||||
)
|
||||
assertBroadcast(t, ctx, 0)
|
||||
|
||||
@ -2946,7 +2905,7 @@ func TestNodeAnnouncementNoChannels(t *testing.T) {
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
batch, err := createAnnouncements(0)
|
||||
batch, err := createRemoteAnnouncements(0)
|
||||
if err != nil {
|
||||
t.Fatalf("can't generate announcements: %v", err)
|
||||
}
|
||||
@ -2980,7 +2939,7 @@ func TestNodeAnnouncementNoChannels(t *testing.T) {
|
||||
// Now add the node's channel to the graph by processing the channel
|
||||
// announement and channel update.
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.remoteChanAnn,
|
||||
case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.chanAnn,
|
||||
remotePeer):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process remote announcement")
|
||||
@ -3051,7 +3010,7 @@ func TestOptionalFieldsChannelUpdateValidation(t *testing.T) {
|
||||
|
||||
chanUpdateHeight := uint32(0)
|
||||
timestamp := uint32(123456)
|
||||
nodePeer := &mockPeer{nodeKeyPriv1.PubKey(), nil, nil}
|
||||
nodePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil}
|
||||
|
||||
// In this scenario, we'll test whether the message flags field in a channel
|
||||
// update is properly handled.
|
||||
@ -3071,14 +3030,14 @@ func TestOptionalFieldsChannelUpdateValidation(t *testing.T) {
|
||||
|
||||
// The first update should fail from an invalid max HTLC field, which is
|
||||
// less than the min HTLC.
|
||||
chanUpdAnn, err := createUpdateAnnouncement(0, 0, nodeKeyPriv1, timestamp)
|
||||
chanUpdAnn, err := createUpdateAnnouncement(0, 0, remoteKeyPriv1, timestamp)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create channel update: %v", err)
|
||||
}
|
||||
|
||||
chanUpdAnn.HtlcMinimumMsat = 5000
|
||||
chanUpdAnn.HtlcMaximumMsat = 4000
|
||||
if err := signUpdate(nodeKeyPriv1, chanUpdAnn); err != nil {
|
||||
if err := signUpdate(remoteKeyPriv1, chanUpdAnn); err != nil {
|
||||
t.Fatalf("unable to sign channel update: %v", err)
|
||||
}
|
||||
|
||||
@ -3095,7 +3054,7 @@ func TestOptionalFieldsChannelUpdateValidation(t *testing.T) {
|
||||
// the max HTLC field is 0.
|
||||
chanUpdAnn.HtlcMinimumMsat = 0
|
||||
chanUpdAnn.HtlcMaximumMsat = 0
|
||||
if err := signUpdate(nodeKeyPriv1, chanUpdAnn); err != nil {
|
||||
if err := signUpdate(remoteKeyPriv1, chanUpdAnn); err != nil {
|
||||
t.Fatalf("unable to sign channel update: %v", err)
|
||||
}
|
||||
|
||||
@ -3111,7 +3070,7 @@ func TestOptionalFieldsChannelUpdateValidation(t *testing.T) {
|
||||
// The final update should succeed, since setting the flag 0 means the
|
||||
// nonsense max_htlc field will just be ignored.
|
||||
chanUpdAnn.MessageFlags = 0
|
||||
if err := signUpdate(nodeKeyPriv1, chanUpdAnn); err != nil {
|
||||
if err := signUpdate(remoteKeyPriv1, chanUpdAnn); err != nil {
|
||||
t.Fatalf("unable to sign channel update: %v", err)
|
||||
}
|
||||
|
||||
@ -3138,17 +3097,14 @@ func TestSendChannelUpdateReliably(t *testing.T) {
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
batch, err := createAnnouncements(0)
|
||||
batch, err := createLocalAnnouncements(0)
|
||||
if err != nil {
|
||||
t.Fatalf("can't generate announcements: %v", err)
|
||||
}
|
||||
|
||||
// We'll also create two keys, one for ourselves and another for the
|
||||
// remote party.
|
||||
localKey, err := btcec.ParsePubKey(batch.nodeAnn1.NodeID[:], btcec.S256())
|
||||
if err != nil {
|
||||
t.Fatalf("unable to parse pubkey: %v", err)
|
||||
}
|
||||
|
||||
remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:], btcec.S256())
|
||||
if err != nil {
|
||||
t.Fatalf("unable to parse pubkey: %v", err)
|
||||
@ -3195,9 +3151,7 @@ func TestSendChannelUpdateReliably(t *testing.T) {
|
||||
// Process the channel announcement for which we'll send a channel
|
||||
// update for.
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(
|
||||
batch.localChanAnn, localKey,
|
||||
):
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanAnn):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process local channel announcement")
|
||||
}
|
||||
@ -3214,9 +3168,7 @@ func TestSendChannelUpdateReliably(t *testing.T) {
|
||||
|
||||
// Now, we'll process the channel update.
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(
|
||||
batch.chanUpdAnn1, localKey,
|
||||
):
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanUpdAnn1):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process local channel update")
|
||||
}
|
||||
@ -3270,14 +3222,14 @@ func TestSendChannelUpdateReliably(t *testing.T) {
|
||||
|
||||
// Now that the remote peer is offline, we'll send a new channel update.
|
||||
batch.chanUpdAnn1.Timestamp++
|
||||
if err := signUpdate(nodeKeyPriv1, batch.chanUpdAnn1); err != nil {
|
||||
if err := signUpdate(selfKeyPriv, batch.chanUpdAnn1); err != nil {
|
||||
t.Fatalf("unable to sign new channel update: %v", err)
|
||||
}
|
||||
|
||||
// With the new update created, we'll go ahead and process it.
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(
|
||||
batch.chanUpdAnn1, localKey,
|
||||
batch.chanUpdAnn1,
|
||||
):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process local channel update")
|
||||
@ -3318,7 +3270,7 @@ func TestSendChannelUpdateReliably(t *testing.T) {
|
||||
// the channel.
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(
|
||||
batch.localProofAnn, localKey,
|
||||
batch.localProofAnn,
|
||||
):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process local channel proof")
|
||||
@ -3366,7 +3318,7 @@ func TestSendChannelUpdateReliably(t *testing.T) {
|
||||
newChannelUpdate := &lnwire.ChannelUpdate{}
|
||||
*newChannelUpdate = *staleChannelUpdate
|
||||
newChannelUpdate.Timestamp++
|
||||
if err := signUpdate(nodeKeyPriv1, newChannelUpdate); err != nil {
|
||||
if err := signUpdate(selfKeyPriv, newChannelUpdate); err != nil {
|
||||
t.Fatalf("unable to sign new channel update: %v", err)
|
||||
}
|
||||
|
||||
@ -3375,7 +3327,7 @@ func TestSendChannelUpdateReliably(t *testing.T) {
|
||||
// not announced.
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(
|
||||
newChannelUpdate, localKey,
|
||||
newChannelUpdate,
|
||||
):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process local channel update")
|
||||
@ -3448,14 +3400,14 @@ func TestSendChannelUpdateReliably(t *testing.T) {
|
||||
}
|
||||
|
||||
func sendLocalMsg(t *testing.T, ctx *testCtx, msg lnwire.Message,
|
||||
localPub *btcec.PublicKey, optionalMsgFields ...OptionalMsgField) {
|
||||
optionalMsgFields ...OptionalMsgField) {
|
||||
|
||||
t.Helper()
|
||||
|
||||
var err error
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(
|
||||
msg, localPub, optionalMsgFields...,
|
||||
msg, optionalMsgFields...,
|
||||
):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process local announcement")
|
||||
@ -3521,7 +3473,7 @@ func TestPropagateChanPolicyUpdate(t *testing.T) {
|
||||
const numChannels = 3
|
||||
channelsToAnnounce := make([]*annBatch, 0, numChannels)
|
||||
for i := 0; i < numChannels; i++ {
|
||||
newChan, err := createAnnouncements(uint32(i + 1))
|
||||
newChan, err := createLocalAnnouncements(uint32(i + 1))
|
||||
if err != nil {
|
||||
t.Fatalf("unable to make new channel ann: %v", err)
|
||||
}
|
||||
@ -3529,8 +3481,7 @@ func TestPropagateChanPolicyUpdate(t *testing.T) {
|
||||
channelsToAnnounce = append(channelsToAnnounce, newChan)
|
||||
}
|
||||
|
||||
localKey := nodeKeyPriv1.PubKey()
|
||||
remoteKey := nodeKeyPriv2.PubKey()
|
||||
remoteKey := remoteKeyPriv1.PubKey()
|
||||
|
||||
sentMsgs := make(chan lnwire.Message, 10)
|
||||
remotePeer := &mockPeer{remoteKey, sentMsgs, ctx.gossiper.quit}
|
||||
@ -3557,7 +3508,7 @@ func TestPropagateChanPolicyUpdate(t *testing.T) {
|
||||
// the gossiper in order for it to process. However, we'll hold back
|
||||
// the channel ann proof from the first channel in order to have it be
|
||||
// marked as private channel.
|
||||
firstChanID := channelsToAnnounce[0].localChanAnn.ShortChannelID
|
||||
firstChanID := channelsToAnnounce[0].chanAnn.ShortChannelID
|
||||
for i, batch := range channelsToAnnounce {
|
||||
// channelPoint ensures that each channel policy in the map
|
||||
// returned by PropagateChanPolicyUpdate has a unique key. Since
|
||||
@ -3565,20 +3516,20 @@ func TestPropagateChanPolicyUpdate(t *testing.T) {
|
||||
// each channel has a unique channel point.
|
||||
channelPoint := ChannelPoint(wire.OutPoint{Index: uint32(i)})
|
||||
|
||||
sendLocalMsg(t, ctx, batch.localChanAnn, localKey, channelPoint)
|
||||
sendLocalMsg(t, ctx, batch.chanUpdAnn1, localKey)
|
||||
sendLocalMsg(t, ctx, batch.nodeAnn1, localKey)
|
||||
sendLocalMsg(t, ctx, batch.chanAnn, channelPoint)
|
||||
sendLocalMsg(t, ctx, batch.chanUpdAnn1)
|
||||
sendLocalMsg(t, ctx, batch.nodeAnn1)
|
||||
|
||||
sendRemoteMsg(t, ctx, batch.chanUpdAnn2, remotePeer)
|
||||
sendRemoteMsg(t, ctx, batch.nodeAnn2, remotePeer)
|
||||
|
||||
// We'll skip sending the auth proofs from the first channel to
|
||||
// ensure that it's seen as a private channel.
|
||||
if batch.localChanAnn.ShortChannelID == firstChanID {
|
||||
if batch.chanAnn.ShortChannelID == firstChanID {
|
||||
continue
|
||||
}
|
||||
|
||||
sendLocalMsg(t, ctx, batch.localProofAnn, localKey)
|
||||
sendLocalMsg(t, ctx, batch.localProofAnn)
|
||||
sendRemoteMsg(t, ctx, batch.remoteProofAnn, remotePeer)
|
||||
}
|
||||
|
||||
@ -3700,9 +3651,8 @@ func TestProcessChannelAnnouncementOptionalMsgFields(t *testing.T) {
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
chanAnn1 := createAnnouncementWithoutProof(100)
|
||||
chanAnn2 := createAnnouncementWithoutProof(101)
|
||||
localKey := nodeKeyPriv1.PubKey()
|
||||
chanAnn1 := createAnnouncementWithoutProof(100, selfKeyPub, remoteKeyPub1)
|
||||
chanAnn2 := createAnnouncementWithoutProof(101, selfKeyPub, remoteKeyPub1)
|
||||
|
||||
// assertOptionalMsgFields is a helper closure that ensures the optional
|
||||
// message fields were set as intended.
|
||||
@ -3727,7 +3677,7 @@ func TestProcessChannelAnnouncementOptionalMsgFields(t *testing.T) {
|
||||
|
||||
// We'll process the first announcement without any optional fields. We
|
||||
// should see the channel's capacity and outpoint have a zero value.
|
||||
sendLocalMsg(t, ctx, chanAnn1, localKey)
|
||||
sendLocalMsg(t, ctx, chanAnn1)
|
||||
assertOptionalMsgFields(chanAnn1.ShortChannelID, 0, wire.OutPoint{})
|
||||
|
||||
// Providing the capacity and channel point as optional fields should
|
||||
@ -3735,7 +3685,7 @@ func TestProcessChannelAnnouncementOptionalMsgFields(t *testing.T) {
|
||||
capacity := btcutil.Amount(1000)
|
||||
channelPoint := wire.OutPoint{Index: 1}
|
||||
sendLocalMsg(
|
||||
t, ctx, chanAnn2, localKey, ChannelCapacity(capacity),
|
||||
t, ctx, chanAnn2, ChannelCapacity(capacity),
|
||||
ChannelPoint(channelPoint),
|
||||
)
|
||||
assertOptionalMsgFields(chanAnn2.ShortChannelID, capacity, channelPoint)
|
||||
@ -3873,16 +3823,14 @@ func TestBroadcastAnnsAfterGraphSynced(t *testing.T) {
|
||||
|
||||
t.Helper()
|
||||
|
||||
nodePeer := &mockPeer{nodeKeyPriv1.PubKey(), nil, nil}
|
||||
nodePeer := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil}
|
||||
var errChan chan error
|
||||
if isRemote {
|
||||
errChan = ctx.gossiper.ProcessRemoteAnnouncement(
|
||||
msg, nodePeer,
|
||||
)
|
||||
} else {
|
||||
errChan = ctx.gossiper.ProcessLocalAnnouncement(
|
||||
msg, nodePeer.pk,
|
||||
)
|
||||
errChan = ctx.gossiper.ProcessLocalAnnouncement(msg)
|
||||
}
|
||||
|
||||
select {
|
||||
@ -3917,7 +3865,7 @@ func TestBroadcastAnnsAfterGraphSynced(t *testing.T) {
|
||||
|
||||
// A local channel announcement should be broadcast though, regardless
|
||||
// of whether we've synced our graph or not.
|
||||
chanUpd, err := createUpdateAnnouncement(0, 0, nodeKeyPriv1, 1)
|
||||
chanUpd, err := createUpdateAnnouncement(0, 0, remoteKeyPriv1, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create channel announcement: %v", err)
|
||||
}
|
||||
@ -3957,13 +3905,13 @@ func TestRateLimitChannelUpdates(t *testing.T) {
|
||||
// We'll create a batch of signed announcements, including updates for
|
||||
// both sides, for a channel and process them. They should all be
|
||||
// forwarded as this is our first time learning about the channel.
|
||||
batch, err := createAnnouncements(blockHeight)
|
||||
batch, err := createRemoteAnnouncements(blockHeight)
|
||||
require.NoError(t, err)
|
||||
|
||||
nodePeer1 := &mockPeer{nodeKeyPriv1.PubKey(), nil, nil}
|
||||
nodePeer1 := &mockPeer{remoteKeyPriv1.PubKey(), nil, nil}
|
||||
select {
|
||||
case err := <-ctx.gossiper.ProcessRemoteAnnouncement(
|
||||
batch.remoteChanAnn, nodePeer1,
|
||||
batch.chanAnn, nodePeer1,
|
||||
):
|
||||
require.NoError(t, err)
|
||||
case <-time.After(time.Second):
|
||||
@ -3979,7 +3927,7 @@ func TestRateLimitChannelUpdates(t *testing.T) {
|
||||
t.Fatal("remote announcement not processed")
|
||||
}
|
||||
|
||||
nodePeer2 := &mockPeer{nodeKeyPriv2.PubKey(), nil, nil}
|
||||
nodePeer2 := &mockPeer{remoteKeyPriv2.PubKey(), nil, nil}
|
||||
select {
|
||||
case err := <-ctx.gossiper.ProcessRemoteAnnouncement(
|
||||
batch.chanUpdAnn2, nodePeer2,
|
||||
@ -3998,7 +3946,7 @@ func TestRateLimitChannelUpdates(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
shortChanID := batch.remoteChanAnn.ShortChannelID.ToUint64()
|
||||
shortChanID := batch.chanAnn.ShortChannelID.ToUint64()
|
||||
require.Contains(t, ctx.router.infos, shortChanID)
|
||||
require.Contains(t, ctx.router.edges, shortChanID)
|
||||
|
||||
@ -4034,7 +3982,7 @@ func TestRateLimitChannelUpdates(t *testing.T) {
|
||||
// our rebroadcast interval.
|
||||
rateLimitKeepAliveUpdate := *batch.chanUpdAnn1
|
||||
rateLimitKeepAliveUpdate.Timestamp++
|
||||
require.NoError(t, signUpdate(nodeKeyPriv1, &rateLimitKeepAliveUpdate))
|
||||
require.NoError(t, signUpdate(remoteKeyPriv1, &rateLimitKeepAliveUpdate))
|
||||
assertRateLimit(&rateLimitKeepAliveUpdate, nodePeer1, true)
|
||||
|
||||
keepAliveUpdate := *batch.chanUpdAnn1
|
||||
@ -4042,7 +3990,7 @@ func TestRateLimitChannelUpdates(t *testing.T) {
|
||||
time.Unix(int64(batch.chanUpdAnn1.Timestamp), 0).
|
||||
Add(ctx.gossiper.cfg.RebroadcastInterval).Unix(),
|
||||
)
|
||||
require.NoError(t, signUpdate(nodeKeyPriv1, &keepAliveUpdate))
|
||||
require.NoError(t, signUpdate(remoteKeyPriv1, &keepAliveUpdate))
|
||||
assertRateLimit(&keepAliveUpdate, nodePeer1, false)
|
||||
|
||||
// Then, we'll move on to the non keep alive cases.
|
||||
@ -4054,7 +4002,7 @@ func TestRateLimitChannelUpdates(t *testing.T) {
|
||||
for i := uint32(0); i < uint32(ctx.gossiper.cfg.MaxChannelUpdateBurst); i++ {
|
||||
updateSameDirection.Timestamp++
|
||||
updateSameDirection.BaseFee++
|
||||
require.NoError(t, signUpdate(nodeKeyPriv1, &updateSameDirection))
|
||||
require.NoError(t, signUpdate(remoteKeyPriv1, &updateSameDirection))
|
||||
assertRateLimit(&updateSameDirection, nodePeer1, false)
|
||||
}
|
||||
|
||||
@ -4062,14 +4010,14 @@ func TestRateLimitChannelUpdates(t *testing.T) {
|
||||
// has been reached and we haven't ticked at the next interval yet.
|
||||
updateSameDirection.Timestamp++
|
||||
updateSameDirection.BaseFee++
|
||||
require.NoError(t, signUpdate(nodeKeyPriv1, &updateSameDirection))
|
||||
require.NoError(t, signUpdate(remoteKeyPriv1, &updateSameDirection))
|
||||
assertRateLimit(&updateSameDirection, nodePeer1, true)
|
||||
|
||||
// An update for the other direction should not be rate limited.
|
||||
updateDiffDirection := *batch.chanUpdAnn2
|
||||
updateDiffDirection.Timestamp++
|
||||
updateDiffDirection.BaseFee++
|
||||
require.NoError(t, signUpdate(nodeKeyPriv2, &updateDiffDirection))
|
||||
require.NoError(t, signUpdate(remoteKeyPriv2, &updateDiffDirection))
|
||||
assertRateLimit(&updateDiffDirection, nodePeer2, false)
|
||||
|
||||
// Wait for the next interval to tick. Since we've only waited for one,
|
||||
@ -4078,9 +4026,173 @@ func TestRateLimitChannelUpdates(t *testing.T) {
|
||||
for i := 0; i < ctx.gossiper.cfg.MaxChannelUpdateBurst; i++ {
|
||||
updateSameDirection.Timestamp++
|
||||
updateSameDirection.BaseFee++
|
||||
require.NoError(t, signUpdate(nodeKeyPriv1, &updateSameDirection))
|
||||
require.NoError(t, signUpdate(remoteKeyPriv1, &updateSameDirection))
|
||||
|
||||
shouldRateLimit := i != 0
|
||||
assertRateLimit(&updateSameDirection, nodePeer1, shouldRateLimit)
|
||||
}
|
||||
}
|
||||
|
||||
// TestIgnoreOwnAnnouncement tests that the gossiper will ignore announcements
|
||||
// about our own channels when coming from a remote peer.
|
||||
func TestIgnoreOwnAnnouncement(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx, cleanup, err := createTestCtx(proofMatureDelta)
|
||||
if err != nil {
|
||||
t.Fatalf("can't create context: %v", err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
batch, err := createLocalAnnouncements(0)
|
||||
if err != nil {
|
||||
t.Fatalf("can't generate announcements: %v", err)
|
||||
}
|
||||
|
||||
remoteKey, err := btcec.ParsePubKey(batch.nodeAnn2.NodeID[:], btcec.S256())
|
||||
if err != nil {
|
||||
t.Fatalf("unable to parse pubkey: %v", err)
|
||||
}
|
||||
remotePeer := &mockPeer{remoteKey, nil, nil}
|
||||
|
||||
// Try to let the remote peer tell us about the channel we are part of.
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
|
||||
batch.chanAnn, remotePeer,
|
||||
):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process remote announcement")
|
||||
}
|
||||
// It should be ignored, since the gossiper only cares about local
|
||||
// announcements for its own channels.
|
||||
if err == nil || !strings.Contains(err.Error(), "ignoring") {
|
||||
t.Fatalf("expected gossiper to ignore announcement, got: %v", err)
|
||||
}
|
||||
|
||||
// Now do the local channelannouncement, node announcement, and channel
|
||||
// update. No messages should be brodcasted yet, since we don't have
|
||||
// the announcement signatures.
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanAnn):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process local announcement")
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("unable to process channel ann: %v", err)
|
||||
}
|
||||
select {
|
||||
case <-ctx.broadcastedMessage:
|
||||
t.Fatal("channel announcement was broadcast")
|
||||
case <-time.After(2 * trickleDelay):
|
||||
}
|
||||
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.chanUpdAnn1):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process local announcement")
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("unable to process channel update: %v", err)
|
||||
}
|
||||
select {
|
||||
case <-ctx.broadcastedMessage:
|
||||
t.Fatal("channel update announcement was broadcast")
|
||||
case <-time.After(2 * trickleDelay):
|
||||
}
|
||||
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.nodeAnn1):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process local announcement")
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("unable to process node ann: %v", err)
|
||||
}
|
||||
select {
|
||||
case <-ctx.broadcastedMessage:
|
||||
t.Fatal("node announcement was broadcast")
|
||||
case <-time.After(2 * trickleDelay):
|
||||
}
|
||||
|
||||
// We should accept the remote's channel update and node announcement.
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
|
||||
batch.chanUpdAnn2, remotePeer,
|
||||
):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process remote announcement")
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("unable to process channel update: %v", err)
|
||||
}
|
||||
select {
|
||||
case <-ctx.broadcastedMessage:
|
||||
t.Fatal("channel update announcement was broadcast")
|
||||
case <-time.After(2 * trickleDelay):
|
||||
}
|
||||
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
|
||||
batch.nodeAnn2, remotePeer,
|
||||
):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process remote announcement")
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("unable to process node ann: %v", err)
|
||||
}
|
||||
select {
|
||||
case <-ctx.broadcastedMessage:
|
||||
t.Fatal("node announcement was broadcast")
|
||||
case <-time.After(2 * trickleDelay):
|
||||
}
|
||||
|
||||
// Now we exchange the proofs, the messages will be broadcasted to the
|
||||
// network.
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessLocalAnnouncement(batch.localProofAnn):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process remote announcement")
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("unable to process local proof: %v", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.broadcastedMessage:
|
||||
t.Fatal("announcements were broadcast")
|
||||
case <-time.After(2 * trickleDelay):
|
||||
}
|
||||
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
|
||||
batch.remoteProofAnn, remotePeer,
|
||||
):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process remote announcement")
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("unable to process remote proof: %v", err)
|
||||
}
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
select {
|
||||
case <-ctx.broadcastedMessage:
|
||||
case <-time.After(time.Second):
|
||||
t.Fatal("announcement wasn't broadcast")
|
||||
}
|
||||
}
|
||||
|
||||
// Finally, we again check that we'll ignore the remote giving us
|
||||
// announcements about our own channel.
|
||||
select {
|
||||
case err = <-ctx.gossiper.ProcessRemoteAnnouncement(
|
||||
batch.chanAnn, remotePeer,
|
||||
):
|
||||
case <-time.After(2 * time.Second):
|
||||
t.Fatal("did not process remote announcement")
|
||||
}
|
||||
if err == nil || !strings.Contains(err.Error(), "ignoring") {
|
||||
t.Fatalf("expected gossiper to ignore announcement, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
17
server.go
17
server.go
@ -670,7 +670,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
||||
// With the announcement generated, we'll sign it to properly
|
||||
// authenticate the message on the network.
|
||||
authSig, err := netann.SignAnnouncement(
|
||||
s.nodeSigner, s.identityECDH.PubKey(), nodeAnn,
|
||||
s.nodeSigner, nodeKeyECDH.PubKey(), nodeAnn,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to generate signature for "+
|
||||
@ -824,7 +824,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
||||
IgnoreHistoricalFilters: cfg.IgnoreHistoricalGossipFilters,
|
||||
PinnedSyncers: cfg.Gossip.PinnedSyncers,
|
||||
},
|
||||
s.identityECDH.PubKey(),
|
||||
nodeKeyECDH.PubKey(),
|
||||
)
|
||||
|
||||
s.localChanMgr = &localchans.Manager{
|
||||
@ -1026,13 +1026,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
||||
CurrentNodeAnnouncement: func() (lnwire.NodeAnnouncement, error) {
|
||||
return s.genNodeAnnouncement(true)
|
||||
},
|
||||
SendAnnouncement: func(msg lnwire.Message,
|
||||
optionalFields ...discovery.OptionalMsgField) chan error {
|
||||
|
||||
return s.authGossiper.ProcessLocalAnnouncement(
|
||||
msg, nodeKeyECDH.PubKey(), optionalFields...,
|
||||
)
|
||||
},
|
||||
SendAnnouncement: s.authGossiper.ProcessLocalAnnouncement,
|
||||
NotifyWhenOnline: s.NotifyWhenOnline,
|
||||
TempChanIDSeed: chanIDSeed,
|
||||
FindChannel: func(chanID lnwire.ChannelID) (
|
||||
@ -1415,7 +1409,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
|
||||
RetryDuration: time.Second * 5,
|
||||
TargetOutbound: 100,
|
||||
Dial: noiseDial(
|
||||
s.identityECDH, s.cfg.net, s.cfg.ConnectionTimeout,
|
||||
nodeKeyECDH, s.cfg.net, s.cfg.ConnectionTimeout,
|
||||
),
|
||||
OnConnection: s.OutboundPeerConnected,
|
||||
})
|
||||
@ -3792,8 +3786,7 @@ func (s *server) fetchLastChanUpdate() func(lnwire.ShortChannelID) (
|
||||
// applyChannelUpdate applies the channel update to the different sub-systems of
|
||||
// the server.
|
||||
func (s *server) applyChannelUpdate(update *lnwire.ChannelUpdate) error {
|
||||
pubKey := s.identityECDH.PubKey()
|
||||
errChan := s.authGossiper.ProcessLocalAnnouncement(update, pubKey)
|
||||
errChan := s.authGossiper.ProcessLocalAnnouncement(update)
|
||||
select {
|
||||
case err := <-errChan:
|
||||
return err
|
||||
|
Loading…
Reference in New Issue
Block a user