2018-03-26 20:09:00 +03:00
|
|
|
package htlcswitch
|
|
|
|
|
|
|
|
import (
|
|
|
|
"crypto/rand"
|
2018-06-01 02:05:02 +03:00
|
|
|
"io/ioutil"
|
2018-03-26 20:09:00 +03:00
|
|
|
"os"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2019-09-11 15:41:08 +03:00
|
|
|
sphinx "github.com/lightningnetwork/lightning-onion"
|
2018-03-26 20:09:00 +03:00
|
|
|
"github.com/lightningnetwork/lnd/chainntnfs"
|
2020-11-09 12:21:30 +03:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb/kvdb"
|
2020-08-27 22:34:55 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lntest/mock"
|
2018-03-26 20:09:00 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
cltv uint32 = 100000
|
|
|
|
)
|
|
|
|
|
2018-06-01 02:05:02 +03:00
|
|
|
// tempDecayedLogPath creates a new temporary database path to back a single
|
|
|
|
// deccayed log instance.
|
2020-11-09 12:21:30 +03:00
|
|
|
func tempDecayedLogPath(t *testing.T) (string, string) {
|
2018-06-01 02:05:02 +03:00
|
|
|
dir, err := ioutil.TempDir("", "decayedlog")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create temporary decayed log dir: %v", err)
|
|
|
|
}
|
|
|
|
|
2020-11-09 12:21:30 +03:00
|
|
|
return dir, "sphinxreplay.db"
|
2018-06-01 02:05:02 +03:00
|
|
|
}
|
|
|
|
|
2018-03-26 20:09:00 +03:00
|
|
|
// startup sets up the DecayedLog and possibly the garbage collector.
|
2020-11-09 12:21:30 +03:00
|
|
|
func startup(dbPath, dbFileName string, notifier bool) (sphinx.ReplayLog,
|
2020-08-27 22:34:55 +03:00
|
|
|
*mock.ChainNotifier, *sphinx.HashPrefix, error) {
|
2018-03-26 20:09:00 +03:00
|
|
|
|
|
|
|
var log sphinx.ReplayLog
|
2020-08-27 22:34:55 +03:00
|
|
|
var chainNotifier *mock.ChainNotifier
|
2018-03-26 20:09:00 +03:00
|
|
|
if notifier {
|
|
|
|
|
|
|
|
// Create the MockNotifier which triggers the garbage collector
|
2020-08-27 22:34:55 +03:00
|
|
|
chainNotifier = &mock.ChainNotifier{
|
|
|
|
SpendChan: make(chan *chainntnfs.SpendDetail),
|
|
|
|
EpochChan: make(chan *chainntnfs.BlockEpoch, 1),
|
|
|
|
ConfChan: make(chan *chainntnfs.TxConfirmation),
|
2018-03-26 20:09:00 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Initialize the DecayedLog object
|
2020-11-09 12:21:30 +03:00
|
|
|
log = NewDecayedLog(
|
|
|
|
dbPath, dbFileName, &kvdb.BoltConfig{}, chainNotifier,
|
|
|
|
)
|
2018-03-26 20:09:00 +03:00
|
|
|
} else {
|
|
|
|
// Initialize the DecayedLog object
|
2020-11-09 12:21:30 +03:00
|
|
|
log = NewDecayedLog(dbPath, dbFileName, &kvdb.BoltConfig{}, nil)
|
2018-03-26 20:09:00 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Open the channeldb (start the garbage collector)
|
|
|
|
err := log.Start()
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a HashPrefix identifier for a packet. Instead of actually
|
|
|
|
// generating an ECDH secret and hashing it, simulate with random bytes.
|
|
|
|
// This is used as a key to retrieve the cltv value.
|
|
|
|
var hashedSecret sphinx.HashPrefix
|
|
|
|
_, err = rand.Read(hashedSecret[:])
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return log, chainNotifier, &hashedSecret, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// shutdown deletes the temporary directory that the test database uses
|
|
|
|
// and handles closing the database.
|
|
|
|
func shutdown(dir string, d sphinx.ReplayLog) {
|
|
|
|
d.Stop()
|
|
|
|
os.RemoveAll(dir)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestDecayedLogGarbageCollector tests the ability of the garbage collector
|
|
|
|
// to delete expired cltv values every time a block is received. Expired cltv
|
|
|
|
// values are cltv values that are < current block height.
|
|
|
|
func TestDecayedLogGarbageCollector(t *testing.T) {
|
2018-05-23 02:55:08 +03:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-11-09 12:21:30 +03:00
|
|
|
dbPath, dbFileName := tempDecayedLogPath(t)
|
2018-06-01 02:05:02 +03:00
|
|
|
|
2020-11-09 12:21:30 +03:00
|
|
|
d, notifier, hashedSecret, err := startup(dbPath, dbFileName, true)
|
2018-03-26 20:09:00 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unable to start up DecayedLog: %v", err)
|
|
|
|
}
|
2018-06-01 02:05:02 +03:00
|
|
|
defer shutdown(dbPath, d)
|
2018-03-26 20:09:00 +03:00
|
|
|
|
|
|
|
// Store <hashedSecret, cltv> in the sharedHashBucket.
|
|
|
|
err = d.Put(hashedSecret, cltv)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unable to store in channeldb: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for database write (GC is in a goroutine)
|
|
|
|
time.Sleep(500 * time.Millisecond)
|
|
|
|
|
|
|
|
// Send block notifications to garbage collector. The garbage collector
|
|
|
|
// should remove the entry by block 100001.
|
|
|
|
|
|
|
|
// Send block 100000
|
2020-08-27 22:34:55 +03:00
|
|
|
notifier.EpochChan <- &chainntnfs.BlockEpoch{
|
2018-03-26 20:09:00 +03:00
|
|
|
Height: 100000,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Assert that hashedSecret is still in the sharedHashBucket
|
|
|
|
val, err := d.Get(hashedSecret)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Get failed - received an error upon Get: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if val != cltv {
|
|
|
|
t.Fatalf("GC incorrectly deleted CLTV")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send block 100001 (expiry block)
|
2020-08-27 22:34:55 +03:00
|
|
|
notifier.EpochChan <- &chainntnfs.BlockEpoch{
|
2018-03-26 20:09:00 +03:00
|
|
|
Height: 100001,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for database write (GC is in a goroutine)
|
|
|
|
time.Sleep(500 * time.Millisecond)
|
|
|
|
|
|
|
|
// Assert that hashedSecret is not in the sharedHashBucket
|
2020-04-09 21:20:47 +03:00
|
|
|
_, err = d.Get(hashedSecret)
|
2018-03-26 20:09:00 +03:00
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("CLTV was not deleted")
|
|
|
|
}
|
|
|
|
if err != sphinx.ErrLogEntryNotFound {
|
|
|
|
t.Fatalf("Get failed - received unexpected error upon Get: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestDecayedLogPersistentGarbageCollector tests the persistence property of
|
|
|
|
// the garbage collector. The garbage collector will be restarted immediately and
|
|
|
|
// a block that expires the stored CLTV value will be sent to the ChainNotifier.
|
|
|
|
// We test that this causes the <hashedSecret, CLTV> pair to be deleted even
|
|
|
|
// on GC restarts.
|
|
|
|
func TestDecayedLogPersistentGarbageCollector(t *testing.T) {
|
2018-05-23 02:55:08 +03:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-11-09 12:21:30 +03:00
|
|
|
dbPath, dbFileName := tempDecayedLogPath(t)
|
2018-06-01 02:05:02 +03:00
|
|
|
|
2020-11-09 12:21:30 +03:00
|
|
|
d, _, hashedSecret, err := startup(dbPath, dbFileName, true)
|
2018-03-26 20:09:00 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unable to start up DecayedLog: %v", err)
|
|
|
|
}
|
2018-06-01 02:05:02 +03:00
|
|
|
defer shutdown(dbPath, d)
|
2018-03-26 20:09:00 +03:00
|
|
|
|
|
|
|
// Store <hashedSecret, cltv> in the sharedHashBucket
|
|
|
|
if err = d.Put(hashedSecret, cltv); err != nil {
|
|
|
|
t.Fatalf("Unable to store in channeldb: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-06-28 00:37:23 +03:00
|
|
|
// The hash prefix should be retrievable from the decayed log.
|
|
|
|
_, err = d.Get(hashedSecret)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Get failed - received unexpected error upon Get: %v", err)
|
|
|
|
}
|
2018-03-26 20:09:00 +03:00
|
|
|
|
|
|
|
// Shut down DecayedLog and the garbage collector along with it.
|
|
|
|
d.Stop()
|
|
|
|
|
2020-11-09 12:21:30 +03:00
|
|
|
d2, notifier2, _, err := startup(dbPath, dbFileName, true)
|
2018-03-26 20:09:00 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unable to restart DecayedLog: %v", err)
|
|
|
|
}
|
2018-06-01 02:05:02 +03:00
|
|
|
defer shutdown(dbPath, d2)
|
2018-03-26 20:09:00 +03:00
|
|
|
|
2018-06-28 00:37:23 +03:00
|
|
|
// Check that the hash prefix still exists in the new db instance.
|
|
|
|
_, err = d2.Get(hashedSecret)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Get failed - received unexpected error upon Get: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-03-26 20:09:00 +03:00
|
|
|
// Send a block notification to the garbage collector that expires
|
|
|
|
// the stored CLTV.
|
2020-08-27 22:34:55 +03:00
|
|
|
notifier2.EpochChan <- &chainntnfs.BlockEpoch{
|
2018-03-26 20:09:00 +03:00
|
|
|
Height: int32(100001),
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for database write (GC is in a goroutine)
|
|
|
|
time.Sleep(500 * time.Millisecond)
|
|
|
|
|
|
|
|
// Assert that hashedSecret is not in the sharedHashBucket
|
2018-06-28 00:37:23 +03:00
|
|
|
_, err = d2.Get(hashedSecret)
|
2018-03-26 20:09:00 +03:00
|
|
|
if err != sphinx.ErrLogEntryNotFound {
|
|
|
|
t.Fatalf("Get failed - received unexpected error upon Get: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestDecayedLogInsertionAndRetrieval inserts a cltv value into the
|
|
|
|
// sharedHashBucket and then deletes it and finally asserts that we can no
|
|
|
|
// longer retrieve it.
|
|
|
|
func TestDecayedLogInsertionAndDeletion(t *testing.T) {
|
2018-05-23 02:55:08 +03:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-11-09 12:21:30 +03:00
|
|
|
dbPath, dbFileName := tempDecayedLogPath(t)
|
2018-06-01 02:05:02 +03:00
|
|
|
|
2020-11-09 12:21:30 +03:00
|
|
|
d, _, hashedSecret, err := startup(dbPath, dbFileName, false)
|
2018-03-26 20:09:00 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unable to start up DecayedLog: %v", err)
|
|
|
|
}
|
2018-06-01 02:05:02 +03:00
|
|
|
defer shutdown(dbPath, d)
|
2018-03-26 20:09:00 +03:00
|
|
|
|
|
|
|
// Store <hashedSecret, cltv> in the sharedHashBucket.
|
|
|
|
err = d.Put(hashedSecret, cltv)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unable to store in channeldb: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete hashedSecret from the sharedHashBucket.
|
|
|
|
err = d.Delete(hashedSecret)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unable to delete from channeldb: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Assert that hashedSecret is not in the sharedHashBucket
|
|
|
|
_, err = d.Get(hashedSecret)
|
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("CLTV was not deleted")
|
|
|
|
}
|
|
|
|
if err != sphinx.ErrLogEntryNotFound {
|
|
|
|
t.Fatalf("Get failed - received unexpected error upon Get: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestDecayedLogStartAndStop tests for persistence. The DecayedLog is started,
|
|
|
|
// a cltv value is stored in the sharedHashBucket, and then it the DecayedLog
|
|
|
|
// is stopped. The DecayedLog is then started up again and we test that the
|
|
|
|
// cltv value is indeed still stored in the sharedHashBucket. We then delete
|
|
|
|
// the cltv value and check that it persists upon startup.
|
|
|
|
func TestDecayedLogStartAndStop(t *testing.T) {
|
2018-05-23 02:55:08 +03:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-11-09 12:21:30 +03:00
|
|
|
dbPath, dbFileName := tempDecayedLogPath(t)
|
2018-06-01 02:05:02 +03:00
|
|
|
|
2020-11-09 12:21:30 +03:00
|
|
|
d, _, hashedSecret, err := startup(dbPath, dbFileName, false)
|
2018-03-26 20:09:00 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unable to start up DecayedLog: %v", err)
|
|
|
|
}
|
2018-06-01 02:05:02 +03:00
|
|
|
defer shutdown(dbPath, d)
|
2018-03-26 20:09:00 +03:00
|
|
|
|
|
|
|
// Store <hashedSecret, cltv> in the sharedHashBucket.
|
|
|
|
err = d.Put(hashedSecret, cltv)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unable to store in channeldb: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Shutdown the DecayedLog's channeldb
|
|
|
|
d.Stop()
|
|
|
|
|
2020-11-09 12:21:30 +03:00
|
|
|
d2, _, hashedSecret2, err := startup(dbPath, dbFileName, false)
|
2018-03-26 20:09:00 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unable to restart DecayedLog: %v", err)
|
|
|
|
}
|
2018-06-01 02:05:02 +03:00
|
|
|
defer shutdown(dbPath, d2)
|
2018-03-26 20:09:00 +03:00
|
|
|
|
|
|
|
// Retrieve the stored cltv value given the hashedSecret key.
|
|
|
|
value, err := d2.Get(hashedSecret)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unable to retrieve from channeldb: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the original cltv value matches the retrieved cltv
|
|
|
|
// value.
|
|
|
|
if cltv != value {
|
|
|
|
t.Fatalf("Value retrieved doesn't match value stored")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete hashedSecret from sharedHashBucket
|
|
|
|
err = d2.Delete(hashedSecret2)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unable to delete from channeldb: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Shutdown the DecayedLog's channeldb
|
|
|
|
d2.Stop()
|
|
|
|
|
2020-11-09 12:21:30 +03:00
|
|
|
d3, _, hashedSecret3, err := startup(dbPath, dbFileName, false)
|
2018-03-26 20:09:00 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unable to restart DecayedLog: %v", err)
|
|
|
|
}
|
2018-06-01 02:05:02 +03:00
|
|
|
defer shutdown(dbPath, d3)
|
2018-03-26 20:09:00 +03:00
|
|
|
|
|
|
|
// Assert that hashedSecret is not in the sharedHashBucket
|
|
|
|
_, err = d3.Get(hashedSecret3)
|
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("CLTV was not deleted")
|
|
|
|
}
|
|
|
|
if err != sphinx.ErrLogEntryNotFound {
|
|
|
|
t.Fatalf("Get failed - received unexpected error upon Get: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestDecayedLogStorageAndRetrieval stores a cltv value and then retrieves it
|
|
|
|
// via the nested sharedHashBucket and finally asserts that the original stored
|
|
|
|
// and retrieved cltv values are equal.
|
|
|
|
func TestDecayedLogStorageAndRetrieval(t *testing.T) {
|
2018-05-23 02:55:08 +03:00
|
|
|
t.Parallel()
|
|
|
|
|
2020-11-09 12:21:30 +03:00
|
|
|
dbPath, dbFileName := tempDecayedLogPath(t)
|
2018-06-01 02:05:02 +03:00
|
|
|
|
2020-11-09 12:21:30 +03:00
|
|
|
d, _, hashedSecret, err := startup(dbPath, dbFileName, false)
|
2018-03-26 20:09:00 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unable to start up DecayedLog: %v", err)
|
|
|
|
}
|
2018-06-01 02:05:02 +03:00
|
|
|
defer shutdown(dbPath, d)
|
2018-03-26 20:09:00 +03:00
|
|
|
|
|
|
|
// Store <hashedSecret, cltv> in the sharedHashBucket
|
|
|
|
err = d.Put(hashedSecret, cltv)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unable to store in channeldb: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Retrieve the stored cltv value given the hashedSecret key.
|
|
|
|
value, err := d.Get(hashedSecret)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unable to retrieve from channeldb: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the original cltv value does not match the value retrieved,
|
|
|
|
// then the test failed.
|
|
|
|
if cltv != value {
|
|
|
|
t.Fatalf("Value retrieved doesn't match value stored")
|
|
|
|
}
|
|
|
|
}
|