2017-02-08 23:56:37 +03:00
|
|
|
package channeldb
|
2018-04-17 05:06:21 +03:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2018-08-12 16:15:24 +03:00
|
|
|
"crypto/sha256"
|
2018-08-14 06:38:30 +03:00
|
|
|
"encoding/binary"
|
2018-04-17 05:06:21 +03:00
|
|
|
"fmt"
|
|
|
|
|
2019-05-23 21:05:27 +03:00
|
|
|
"github.com/btcsuite/btcd/btcec"
|
2018-04-17 05:06:21 +03:00
|
|
|
"github.com/coreos/bbolt"
|
2019-02-06 04:18:20 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
2019-05-23 21:05:27 +03:00
|
|
|
"github.com/lightningnetwork/lnd/routing/route"
|
2018-04-17 05:06:21 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
// migrateNodeAndEdgeUpdateIndex is a migration function that will update the
|
|
|
|
// database from version 0 to version 1. In version 1, we add two new indexes
|
|
|
|
// (one for nodes and one for edges) to keep track of the last time a node or
|
|
|
|
// edge was updated on the network. These new indexes allow us to implement the
|
|
|
|
// new graph sync protocol added.
|
2018-11-30 07:04:21 +03:00
|
|
|
func migrateNodeAndEdgeUpdateIndex(tx *bbolt.Tx) error {
|
2018-04-17 05:06:21 +03:00
|
|
|
// First, we'll populating the node portion of the new index. Before we
|
|
|
|
// can add new values to the index, we'll first create the new bucket
|
|
|
|
// where these items will be housed.
|
|
|
|
nodes, err := tx.CreateBucketIfNotExists(nodeBucket)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to create node bucket: %v", err)
|
|
|
|
}
|
|
|
|
nodeUpdateIndex, err := nodes.CreateBucketIfNotExists(
|
|
|
|
nodeUpdateIndexBucket,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to create node update index: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infof("Populating new node update index bucket")
|
|
|
|
|
|
|
|
// Now that we know the bucket has been created, we'll iterate over the
|
|
|
|
// entire node bucket so we can add the (updateTime || nodePub) key
|
|
|
|
// into the node update index.
|
|
|
|
err = nodes.ForEach(func(nodePub, nodeInfo []byte) error {
|
|
|
|
if len(nodePub) != 33 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Tracef("Adding %x to node update index", nodePub)
|
|
|
|
|
|
|
|
// The first 8 bytes of a node's serialize data is the update
|
|
|
|
// time, so we can extract that without decoding the entire
|
|
|
|
// structure.
|
|
|
|
updateTime := nodeInfo[:8]
|
|
|
|
|
|
|
|
// Now that we have the update time, we can construct the key
|
|
|
|
// to insert into the index.
|
|
|
|
var indexKey [8 + 33]byte
|
|
|
|
copy(indexKey[:8], updateTime)
|
|
|
|
copy(indexKey[8:], nodePub)
|
|
|
|
|
|
|
|
return nodeUpdateIndex.Put(indexKey[:], nil)
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to update node indexes: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infof("Populating new edge update index bucket")
|
|
|
|
|
|
|
|
// With the set of nodes updated, we'll now update all edges to have a
|
|
|
|
// corresponding entry in the edge update index.
|
|
|
|
edges, err := tx.CreateBucketIfNotExists(edgeBucket)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to create edge bucket: %v", err)
|
|
|
|
}
|
|
|
|
edgeUpdateIndex, err := edges.CreateBucketIfNotExists(
|
|
|
|
edgeUpdateIndexBucket,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to create edge update index: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll now run through each edge policy in the database, and update
|
|
|
|
// the index to ensure each edge has the proper record.
|
|
|
|
err = edges.ForEach(func(edgeKey, edgePolicyBytes []byte) error {
|
|
|
|
if len(edgeKey) != 41 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that we know this is the proper record, we'll grab the
|
|
|
|
// channel ID (last 8 bytes of the key), and then decode the
|
|
|
|
// edge policy so we can access the update time.
|
|
|
|
chanID := edgeKey[33:]
|
|
|
|
edgePolicyReader := bytes.NewReader(edgePolicyBytes)
|
|
|
|
|
|
|
|
edgePolicy, err := deserializeChanEdgePolicy(
|
|
|
|
edgePolicyReader, nodes,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Tracef("Adding chan_id=%v to edge update index",
|
|
|
|
edgePolicy.ChannelID)
|
|
|
|
|
|
|
|
// We'll now construct the index key using the channel ID, and
|
|
|
|
// the last time it was updated: (updateTime || chanID).
|
|
|
|
var indexKey [8 + 8]byte
|
|
|
|
byteOrder.PutUint64(
|
|
|
|
indexKey[:], uint64(edgePolicy.LastUpdate.Unix()),
|
|
|
|
)
|
|
|
|
copy(indexKey[8:], chanID)
|
|
|
|
|
|
|
|
return edgeUpdateIndex.Put(indexKey[:], nil)
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to update edge indexes: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infof("Migration to node and edge update indexes complete!")
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2018-04-25 07:02:14 +03:00
|
|
|
|
|
|
|
// migrateInvoiceTimeSeries is a database migration that assigns all existing
|
|
|
|
// invoices an index in the add and/or the settle index. Additionally, all
|
|
|
|
// existing invoices will have their bytes padded out in order to encode the
|
|
|
|
// add+settle index as well as the amount paid.
|
2018-11-30 07:04:21 +03:00
|
|
|
func migrateInvoiceTimeSeries(tx *bbolt.Tx) error {
|
2018-04-25 07:02:14 +03:00
|
|
|
invoices, err := tx.CreateBucketIfNotExists(invoiceBucket)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
addIndex, err := invoices.CreateBucketIfNotExists(
|
|
|
|
addIndexBucket,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
settleIndex, err := invoices.CreateBucketIfNotExists(
|
|
|
|
settleIndexBucket,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-07-06 06:07:40 +03:00
|
|
|
log.Infof("Migrating invoice database to new time series format")
|
|
|
|
|
2018-04-25 07:02:14 +03:00
|
|
|
// Now that we have all the buckets we need, we'll run through each
|
|
|
|
// invoice in the database, and update it to reflect the new format
|
|
|
|
// expected post migration.
|
2018-07-06 06:07:40 +03:00
|
|
|
err = invoices.ForEach(func(invoiceNum, invoiceBytes []byte) error {
|
|
|
|
// If this is a sub bucket, then we'll skip it.
|
|
|
|
if invoiceBytes == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-04-25 07:02:14 +03:00
|
|
|
// First, we'll make a copy of the encoded invoice bytes.
|
|
|
|
invoiceBytesCopy := make([]byte, len(invoiceBytes))
|
|
|
|
copy(invoiceBytesCopy, invoiceBytes)
|
|
|
|
|
|
|
|
// With the bytes copied over, we'll append 24 additional
|
|
|
|
// bytes. We do this so we can decode the invoice under the new
|
|
|
|
// serialization format.
|
|
|
|
padding := bytes.Repeat([]byte{0}, 24)
|
|
|
|
invoiceBytesCopy = append(invoiceBytesCopy, padding...)
|
|
|
|
|
|
|
|
invoiceReader := bytes.NewReader(invoiceBytesCopy)
|
|
|
|
invoice, err := deserializeInvoice(invoiceReader)
|
|
|
|
if err != nil {
|
2018-07-06 06:07:40 +03:00
|
|
|
return fmt.Errorf("unable to decode invoice: %v", err)
|
2018-04-25 07:02:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Now that we have the fully decoded invoice, we can update
|
|
|
|
// the various indexes that we're added, and finally the
|
|
|
|
// invoice itself before re-inserting it.
|
|
|
|
|
|
|
|
// First, we'll get the new sequence in the addIndex in order
|
|
|
|
// to create the proper mapping.
|
|
|
|
nextAddSeqNo, err := addIndex.NextSequence()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
var seqNoBytes [8]byte
|
|
|
|
byteOrder.PutUint64(seqNoBytes[:], nextAddSeqNo)
|
|
|
|
err = addIndex.Put(seqNoBytes[:], invoiceNum[:])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
channeldb: add new migration to finalize invoice migration for outgoing payments
In this commit, we migrate the database away from a partially migrated
state. In a prior commit, we migrated the database in order to update
the Invoice struct with three new fields: add index, settle index, paid
amt. However, it was overlooked that the OutgoingPayment struct also
embedded an Invoice within it. As a result, nodes that upgraded to the
first migration found themselves unable to start up, or call
listpayments, as the internal invoice within the OutgoignPayment hadn't
yet been updated. This would result in an OOM typically as we went to
allocate a slice with a integer that should have been small, but may
have ended up actually being a set of random bytes, so a very large
number.
In this commit, we finish the DB migration by also migrating the
internal invoice within each OutgoingPayment.
Fixes #1538.
Fixes #1546.
2018-07-12 06:38:02 +03:00
|
|
|
log.Tracef("Adding invoice (preimage=%x, add_index=%v) to add "+
|
2018-07-06 06:07:40 +03:00
|
|
|
"time series", invoice.Terms.PaymentPreimage[:],
|
|
|
|
nextAddSeqNo)
|
|
|
|
|
2018-04-25 07:02:14 +03:00
|
|
|
// Next, we'll check if the invoice has been settled or not. If
|
|
|
|
// so, then we'll also add it to the settle index.
|
|
|
|
var nextSettleSeqNo uint64
|
2018-12-19 17:56:26 +03:00
|
|
|
if invoice.Terms.State == ContractSettled {
|
2018-04-25 07:02:14 +03:00
|
|
|
nextSettleSeqNo, err = settleIndex.NextSequence()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var seqNoBytes [8]byte
|
|
|
|
byteOrder.PutUint64(seqNoBytes[:], nextSettleSeqNo)
|
|
|
|
err := settleIndex.Put(seqNoBytes[:], invoiceNum)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
invoice.AmtPaid = invoice.Terms.Value
|
2018-07-06 06:07:40 +03:00
|
|
|
|
2018-07-12 03:05:56 +03:00
|
|
|
log.Tracef("Adding invoice (preimage=%x, "+
|
2018-07-06 06:07:40 +03:00
|
|
|
"settle_index=%v) to add time series",
|
|
|
|
invoice.Terms.PaymentPreimage[:],
|
|
|
|
nextSettleSeqNo)
|
2018-04-25 07:02:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, we'll update the invoice itself with the new
|
|
|
|
// indexing information as well as the amount paid if it has
|
|
|
|
// been settled or not.
|
|
|
|
invoice.AddIndex = nextAddSeqNo
|
|
|
|
invoice.SettleIndex = nextSettleSeqNo
|
|
|
|
|
|
|
|
// We've fully migrated an invoice, so we'll now update the
|
|
|
|
// invoice in-place.
|
|
|
|
var b bytes.Buffer
|
|
|
|
if err := serializeInvoice(&b, &invoice); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return invoices.Put(invoiceNum, b.Bytes())
|
|
|
|
})
|
2018-07-06 06:07:40 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infof("Migration to invoice time series index complete!")
|
|
|
|
|
|
|
|
return nil
|
2018-04-25 07:02:14 +03:00
|
|
|
}
|
channeldb: add new migration to finalize invoice migration for outgoing payments
In this commit, we migrate the database away from a partially migrated
state. In a prior commit, we migrated the database in order to update
the Invoice struct with three new fields: add index, settle index, paid
amt. However, it was overlooked that the OutgoingPayment struct also
embedded an Invoice within it. As a result, nodes that upgraded to the
first migration found themselves unable to start up, or call
listpayments, as the internal invoice within the OutgoignPayment hadn't
yet been updated. This would result in an OOM typically as we went to
allocate a slice with a integer that should have been small, but may
have ended up actually being a set of random bytes, so a very large
number.
In this commit, we finish the DB migration by also migrating the
internal invoice within each OutgoingPayment.
Fixes #1538.
Fixes #1546.
2018-07-12 06:38:02 +03:00
|
|
|
|
|
|
|
// migrateInvoiceTimeSeriesOutgoingPayments is a follow up to the
|
|
|
|
// migrateInvoiceTimeSeries migration. As at the time of writing, the
|
|
|
|
// OutgoingPayment struct embeddeds an instance of the Invoice struct. As a
|
|
|
|
// result, we also need to migrate the internal invoice to the new format.
|
2018-11-30 07:04:21 +03:00
|
|
|
func migrateInvoiceTimeSeriesOutgoingPayments(tx *bbolt.Tx) error {
|
channeldb: add new migration to finalize invoice migration for outgoing payments
In this commit, we migrate the database away from a partially migrated
state. In a prior commit, we migrated the database in order to update
the Invoice struct with three new fields: add index, settle index, paid
amt. However, it was overlooked that the OutgoingPayment struct also
embedded an Invoice within it. As a result, nodes that upgraded to the
first migration found themselves unable to start up, or call
listpayments, as the internal invoice within the OutgoignPayment hadn't
yet been updated. This would result in an OOM typically as we went to
allocate a slice with a integer that should have been small, but may
have ended up actually being a set of random bytes, so a very large
number.
In this commit, we finish the DB migration by also migrating the
internal invoice within each OutgoingPayment.
Fixes #1538.
Fixes #1546.
2018-07-12 06:38:02 +03:00
|
|
|
payBucket := tx.Bucket(paymentBucket)
|
|
|
|
if payBucket == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infof("Migrating invoice database to new outgoing payment format")
|
|
|
|
|
|
|
|
err := payBucket.ForEach(func(payID, paymentBytes []byte) error {
|
|
|
|
log.Tracef("Migrating payment %x", payID[:])
|
|
|
|
|
|
|
|
// The internal invoices for each payment only contain a
|
|
|
|
// populated contract term, and creation date, as a result,
|
|
|
|
// most of the bytes will be "empty".
|
|
|
|
|
|
|
|
// We'll calculate the end of the invoice index assuming a
|
|
|
|
// "minimal" index that's embedded within the greater
|
|
|
|
// OutgoingPayment. The breakdown is:
|
|
|
|
// 3 bytes empty var bytes, 16 bytes creation date, 16 bytes
|
|
|
|
// settled date, 32 bytes payment pre-image, 8 bytes value, 1
|
|
|
|
// byte settled.
|
|
|
|
endOfInvoiceIndex := 1 + 1 + 1 + 16 + 16 + 32 + 8 + 1
|
|
|
|
|
|
|
|
// We'll now extract the prefix of the pure invoice embedded
|
|
|
|
// within.
|
|
|
|
invoiceBytes := paymentBytes[:endOfInvoiceIndex]
|
|
|
|
|
|
|
|
// With the prefix extracted, we'll copy over the invoice, and
|
|
|
|
// also add padding for the new 24 bytes of fields, and finally
|
|
|
|
// append the remainder of the outgoing payment.
|
|
|
|
paymentCopy := make([]byte, len(invoiceBytes))
|
|
|
|
copy(paymentCopy[:], invoiceBytes)
|
|
|
|
|
|
|
|
padding := bytes.Repeat([]byte{0}, 24)
|
|
|
|
paymentCopy = append(paymentCopy, padding...)
|
|
|
|
paymentCopy = append(
|
|
|
|
paymentCopy, paymentBytes[endOfInvoiceIndex:]...,
|
|
|
|
)
|
|
|
|
|
|
|
|
// At this point, we now have the new format of the outgoing
|
|
|
|
// payments, we'll attempt to deserialize it to ensure the
|
|
|
|
// bytes are properly formatted.
|
|
|
|
paymentReader := bytes.NewReader(paymentCopy)
|
|
|
|
_, err := deserializeOutgoingPayment(paymentReader)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to deserialize payment: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that we know the modifications was successful, we'll
|
|
|
|
// write it back to disk in the new format.
|
|
|
|
if err := payBucket.Put(payID, paymentCopy); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infof("Migration to outgoing payment invoices complete!")
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2018-06-18 13:35:22 +03:00
|
|
|
|
|
|
|
// migrateEdgePolicies is a migration function that will update the edges
|
|
|
|
// bucket. It ensure that edges with unknown policies will also have an entry
|
|
|
|
// in the bucket. After the migration, there will be two edge entries for
|
|
|
|
// every channel, regardless of whether the policies are known.
|
2018-11-30 07:04:21 +03:00
|
|
|
func migrateEdgePolicies(tx *bbolt.Tx) error {
|
2018-06-18 13:35:22 +03:00
|
|
|
nodes := tx.Bucket(nodeBucket)
|
|
|
|
if nodes == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
edges := tx.Bucket(edgeBucket)
|
|
|
|
if edges == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
edgeIndex := edges.Bucket(edgeIndexBucket)
|
|
|
|
if edgeIndex == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// checkKey gets the policy from the database with a low-level call
|
|
|
|
// so that it is still possible to distinguish between unknown and
|
|
|
|
// not present.
|
|
|
|
checkKey := func(channelId uint64, keyBytes []byte) error {
|
|
|
|
var channelID [8]byte
|
|
|
|
byteOrder.PutUint64(channelID[:], channelId)
|
|
|
|
|
|
|
|
_, err := fetchChanEdgePolicy(edges,
|
|
|
|
channelID[:], keyBytes, nodes)
|
|
|
|
|
|
|
|
if err == ErrEdgeNotFound {
|
|
|
|
log.Tracef("Adding unknown edge policy present for node %x, channel %v",
|
|
|
|
keyBytes, channelId)
|
|
|
|
|
|
|
|
err := putChanEdgePolicyUnknown(edges, channelId, keyBytes)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate over all channels and check both edge policies.
|
|
|
|
err := edgeIndex.ForEach(func(chanID, edgeInfoBytes []byte) error {
|
|
|
|
infoReader := bytes.NewReader(edgeInfoBytes)
|
|
|
|
edgeInfo, err := deserializeChanEdgeInfo(infoReader)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, key := range [][]byte{edgeInfo.NodeKey1Bytes[:],
|
|
|
|
edgeInfo.NodeKey2Bytes[:]} {
|
|
|
|
|
|
|
|
if err := checkKey(edgeInfo.ChannelID, key); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to update edge policies: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infof("Migration of edge policies complete!")
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2018-08-12 16:15:24 +03:00
|
|
|
|
|
|
|
// paymentStatusesMigration is a database migration intended for adding payment
|
|
|
|
// statuses for each existing payment entity in bucket to be able control
|
|
|
|
// transitions of statuses and prevent cases such as double payment
|
2018-11-30 07:04:21 +03:00
|
|
|
func paymentStatusesMigration(tx *bbolt.Tx) error {
|
2018-08-12 16:15:24 +03:00
|
|
|
// Get the bucket dedicated to storing statuses of payments,
|
2018-08-11 00:31:07 +03:00
|
|
|
// where a key is payment hash, value is payment status.
|
2018-08-12 16:15:24 +03:00
|
|
|
paymentStatuses, err := tx.CreateBucketIfNotExists(paymentStatusBucket)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-08-14 06:38:30 +03:00
|
|
|
log.Infof("Migrating database to support payment statuses")
|
|
|
|
|
|
|
|
circuitAddKey := []byte("circuit-adds")
|
|
|
|
circuits := tx.Bucket(circuitAddKey)
|
|
|
|
if circuits != nil {
|
|
|
|
log.Infof("Marking all known circuits with status InFlight")
|
|
|
|
|
|
|
|
err = circuits.ForEach(func(k, v []byte) error {
|
|
|
|
// Parse the first 8 bytes as the short chan ID for the
|
|
|
|
// circuit. We'll skip all short chan IDs are not
|
|
|
|
// locally initiated, which includes all non-zero short
|
|
|
|
// chan ids.
|
|
|
|
chanID := binary.BigEndian.Uint64(k[:8])
|
|
|
|
if chanID != 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// The payment hash is the third item in the serialized
|
|
|
|
// payment circuit. The first two items are an AddRef
|
|
|
|
// (10 bytes) and the incoming circuit key (16 bytes).
|
|
|
|
const payHashOffset = 10 + 16
|
|
|
|
|
|
|
|
paymentHash := v[payHashOffset : payHashOffset+32]
|
|
|
|
|
|
|
|
return paymentStatuses.Put(
|
|
|
|
paymentHash[:], StatusInFlight.Bytes(),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infof("Marking all existing payments with status Completed")
|
|
|
|
|
|
|
|
// Get the bucket dedicated to storing payments
|
|
|
|
bucket := tx.Bucket(paymentBucket)
|
|
|
|
if bucket == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2018-08-12 16:15:24 +03:00
|
|
|
|
2018-08-11 00:31:07 +03:00
|
|
|
// For each payment in the bucket, deserialize the payment and mark it
|
|
|
|
// as completed.
|
|
|
|
err = bucket.ForEach(func(k, v []byte) error {
|
2018-08-12 16:15:24 +03:00
|
|
|
// Ignores if it is sub-bucket.
|
|
|
|
if v == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
r := bytes.NewReader(v)
|
|
|
|
payment, err := deserializeOutgoingPayment(r)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Calculate payment hash for current payment.
|
|
|
|
paymentHash := sha256.Sum256(payment.PaymentPreimage[:])
|
|
|
|
|
2018-08-11 00:31:07 +03:00
|
|
|
// Update status for current payment to completed. If it fails,
|
|
|
|
// the migration is aborted and the payment bucket is returned
|
|
|
|
// to its previous state.
|
2019-05-23 21:05:31 +03:00
|
|
|
return paymentStatuses.Put(paymentHash[:], StatusSucceeded.Bytes())
|
2018-08-12 16:15:24 +03:00
|
|
|
})
|
2018-08-11 00:31:07 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infof("Migration of payment statuses complete!")
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2018-09-01 00:59:37 +03:00
|
|
|
|
|
|
|
// migratePruneEdgeUpdateIndex is a database migration that attempts to resolve
|
|
|
|
// some lingering bugs with regards to edge policies and their update index.
|
|
|
|
// Stale entries within the edge update index were not being properly pruned due
|
|
|
|
// to a miscalculation on the offset of an edge's policy last update. This
|
|
|
|
// migration also fixes the case where the public keys within edge policies were
|
|
|
|
// being serialized with an extra byte, causing an even greater error when
|
|
|
|
// attempting to perform the offset calculation described earlier.
|
2018-11-30 07:04:21 +03:00
|
|
|
func migratePruneEdgeUpdateIndex(tx *bbolt.Tx) error {
|
2018-09-01 00:59:37 +03:00
|
|
|
// To begin the migration, we'll retrieve the update index bucket. If it
|
|
|
|
// does not exist, we have nothing left to do so we can simply exit.
|
|
|
|
edges := tx.Bucket(edgeBucket)
|
|
|
|
if edges == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2018-09-13 08:06:11 +03:00
|
|
|
edgeUpdateIndex := edges.Bucket(edgeUpdateIndexBucket)
|
|
|
|
if edgeUpdateIndex == nil {
|
|
|
|
return nil
|
2018-09-01 00:59:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Retrieve some buckets that will be needed later on. These should
|
2018-09-14 22:35:00 +03:00
|
|
|
// already exist given the assumption that the buckets above do as
|
|
|
|
// well.
|
2018-09-12 02:09:19 +03:00
|
|
|
edgeIndex, err := edges.CreateBucketIfNotExists(edgeIndexBucket)
|
2018-09-01 00:59:37 +03:00
|
|
|
if edgeIndex == nil {
|
2018-09-12 02:09:19 +03:00
|
|
|
return fmt.Errorf("unable to create/fetch edge index " +
|
|
|
|
"bucket")
|
2018-09-01 00:59:37 +03:00
|
|
|
}
|
2018-09-12 02:09:19 +03:00
|
|
|
nodes, err := tx.CreateBucketIfNotExists(nodeBucket)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to make node bucket")
|
2018-09-01 00:59:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
log.Info("Migrating database to properly prune edge update index")
|
|
|
|
|
|
|
|
// We'll need to properly prune all the outdated entries within the edge
|
|
|
|
// update index. To do so, we'll gather all of the existing policies
|
|
|
|
// within the graph to re-populate them later on.
|
|
|
|
var edgeKeys [][]byte
|
2018-09-12 02:09:19 +03:00
|
|
|
err = edges.ForEach(func(edgeKey, edgePolicyBytes []byte) error {
|
2018-09-01 00:59:37 +03:00
|
|
|
// All valid entries are indexed by a public key (33 bytes)
|
|
|
|
// followed by a channel ID (8 bytes), so we'll skip any entries
|
|
|
|
// with keys that do not match this.
|
|
|
|
if len(edgeKey) != 33+8 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
edgeKeys = append(edgeKeys, edgeKey)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to gather existing edge policies: %v",
|
|
|
|
err)
|
|
|
|
}
|
|
|
|
|
2018-09-17 07:04:13 +03:00
|
|
|
log.Info("Constructing set of edge update entries to purge.")
|
|
|
|
|
|
|
|
// Build the set of keys that we will remove from the edge update index.
|
|
|
|
// This will include all keys contained within the bucket.
|
|
|
|
var updateKeysToRemove [][]byte
|
|
|
|
err = edgeUpdateIndex.ForEach(func(updKey, _ []byte) error {
|
|
|
|
updateKeysToRemove = append(updateKeysToRemove, updKey)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to gather existing edge updates: %v",
|
|
|
|
err)
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infof("Removing %d entries from edge update index.",
|
|
|
|
len(updateKeysToRemove))
|
|
|
|
|
|
|
|
// With the set of keys contained in the edge update index constructed,
|
|
|
|
// we'll proceed in purging all of them from the index.
|
|
|
|
for _, updKey := range updateKeysToRemove {
|
|
|
|
if err := edgeUpdateIndex.Delete(updKey); err != nil {
|
2018-09-14 22:35:00 +03:00
|
|
|
return err
|
|
|
|
}
|
2018-09-01 00:59:37 +03:00
|
|
|
}
|
|
|
|
|
2018-09-17 07:04:13 +03:00
|
|
|
log.Infof("Repopulating edge update index with %d valid entries.",
|
|
|
|
len(edgeKeys))
|
|
|
|
|
2018-09-01 00:59:37 +03:00
|
|
|
// For each edge key, we'll retrieve the policy, deserialize it, and
|
|
|
|
// re-add it to the different buckets. By doing so, we'll ensure that
|
|
|
|
// all existing edge policies are serialized correctly within their
|
|
|
|
// respective buckets and that the correct entries are populated within
|
|
|
|
// the edge update index.
|
|
|
|
for _, edgeKey := range edgeKeys {
|
|
|
|
edgePolicyBytes := edges.Get(edgeKey)
|
|
|
|
|
|
|
|
// Skip any entries with unknown policies as there will not be
|
|
|
|
// any entries for them in the edge update index.
|
|
|
|
if bytes.Equal(edgePolicyBytes[:], unknownPolicy) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
edgePolicy, err := deserializeChanEdgePolicy(
|
|
|
|
bytes.NewReader(edgePolicyBytes), nodes,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-04-02 02:34:51 +03:00
|
|
|
_, err = updateEdgePolicy(tx, edgePolicy)
|
2018-09-01 00:59:37 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Info("Migration to properly prune edge update index complete!")
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2018-11-20 17:09:45 +03:00
|
|
|
|
|
|
|
// migrateOptionalChannelCloseSummaryFields migrates the serialized format of
|
|
|
|
// ChannelCloseSummary to a format where optional fields' presence is indicated
|
|
|
|
// with boolean markers.
|
2018-11-30 07:04:21 +03:00
|
|
|
func migrateOptionalChannelCloseSummaryFields(tx *bbolt.Tx) error {
|
2018-11-20 17:09:45 +03:00
|
|
|
closedChanBucket := tx.Bucket(closedChannelBucket)
|
|
|
|
if closedChanBucket == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Info("Migrating to new closed channel format...")
|
|
|
|
err := closedChanBucket.ForEach(func(chanID, summary []byte) error {
|
|
|
|
r := bytes.NewReader(summary)
|
|
|
|
|
|
|
|
// Read the old (v6) format from the database.
|
|
|
|
c, err := deserializeCloseChannelSummaryV6(r)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Serialize using the new format, and put back into the
|
|
|
|
// bucket.
|
|
|
|
var b bytes.Buffer
|
|
|
|
if err := serializeChannelCloseSummary(&b, c); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return closedChanBucket.Put(chanID, b.Bytes())
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to update closed channels: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Info("Migration to new closed channel format complete!")
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2019-02-06 04:18:20 +03:00
|
|
|
|
|
|
|
var messageStoreBucket = []byte("message-store")
|
|
|
|
|
|
|
|
// migrateGossipMessageStoreKeys migrates the key format for gossip messages
|
|
|
|
// found in the message store to a new one that takes into consideration the of
|
|
|
|
// the message being stored.
|
|
|
|
func migrateGossipMessageStoreKeys(tx *bbolt.Tx) error {
|
|
|
|
// We'll start by retrieving the bucket in which these messages are
|
|
|
|
// stored within. If there isn't one, there's nothing left for us to do
|
|
|
|
// so we can avoid the migration.
|
|
|
|
messageStore := tx.Bucket(messageStoreBucket)
|
|
|
|
if messageStore == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Info("Migrating to the gossip message store new key format")
|
|
|
|
|
|
|
|
// Otherwise we'll proceed with the migration. We'll start by coalescing
|
|
|
|
// all the current messages within the store, which are indexed by the
|
|
|
|
// public key of the peer which they should be sent to, followed by the
|
|
|
|
// short channel ID of the channel for which the message belongs to. We
|
|
|
|
// should only expect to find channel announcement signatures as that
|
|
|
|
// was the only support message type previously.
|
|
|
|
msgs := make(map[[33 + 8]byte]*lnwire.AnnounceSignatures)
|
|
|
|
err := messageStore.ForEach(func(k, v []byte) error {
|
|
|
|
var msgKey [33 + 8]byte
|
|
|
|
copy(msgKey[:], k)
|
|
|
|
|
|
|
|
msg := &lnwire.AnnounceSignatures{}
|
|
|
|
if err := msg.Decode(bytes.NewReader(v), 0); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
msgs[msgKey] = msg
|
|
|
|
|
|
|
|
return nil
|
|
|
|
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Then, we'll go over all of our messages, remove their previous entry,
|
|
|
|
// and add another with the new key format. Once we've done this for
|
|
|
|
// every message, we can consider the migration complete.
|
|
|
|
for oldMsgKey, msg := range msgs {
|
|
|
|
if err := messageStore.Delete(oldMsgKey[:]); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Construct the new key for which we'll find this message with
|
|
|
|
// in the store. It'll be the same as the old, but we'll also
|
|
|
|
// include the message type.
|
|
|
|
var msgType [2]byte
|
|
|
|
binary.BigEndian.PutUint16(msgType[:], uint16(msg.MsgType()))
|
|
|
|
newMsgKey := append(oldMsgKey[:], msgType[:]...)
|
|
|
|
|
|
|
|
// Serialize the message with its wire encoding.
|
|
|
|
var b bytes.Buffer
|
|
|
|
if _, err := lnwire.WriteMessage(&b, msg, 0); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := messageStore.Put(newMsgKey, b.Bytes()); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Info("Migration to the gossip message store new key format complete!")
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2019-05-23 21:05:27 +03:00
|
|
|
|
|
|
|
// migrateOutgoingPayments moves the OutgoingPayments into a new bucket format
|
|
|
|
// where they all reside in a top-level bucket indexed by the payment hash. In
|
|
|
|
// this sub-bucket we store information relevant to this payment, such as the
|
|
|
|
// payment status.
|
|
|
|
//
|
|
|
|
// Since the router cannot handle resumed payments that have the status
|
|
|
|
// InFlight (we have no PaymentAttemptInfo available for pre-migration
|
|
|
|
// payments) we delete those statuses, so only Completed payments remain in the
|
|
|
|
// new bucket structure.
|
|
|
|
func migrateOutgoingPayments(tx *bbolt.Tx) error {
|
|
|
|
oldPayments, err := tx.CreateBucketIfNotExists(paymentBucket)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
newPayments, err := tx.CreateBucket(paymentsRootBucket)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the source pubkey.
|
|
|
|
nodes := tx.Bucket(nodeBucket)
|
|
|
|
if nodes == nil {
|
|
|
|
return ErrGraphNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
selfPub := nodes.Get(sourceKey)
|
|
|
|
if selfPub == nil {
|
|
|
|
return ErrSourceNodeNotSet
|
|
|
|
}
|
|
|
|
var sourcePubKey [33]byte
|
|
|
|
copy(sourcePubKey[:], selfPub[:])
|
|
|
|
|
|
|
|
log.Infof("Migrating outgoing payments to new bucket structure")
|
|
|
|
|
|
|
|
err = oldPayments.ForEach(func(k, v []byte) error {
|
|
|
|
// Ignores if it is sub-bucket.
|
|
|
|
if v == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Read the old payment format.
|
|
|
|
r := bytes.NewReader(v)
|
|
|
|
payment, err := deserializeOutgoingPayment(r)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Calculate payment hash from the payment preimage.
|
|
|
|
paymentHash := sha256.Sum256(payment.PaymentPreimage[:])
|
|
|
|
|
|
|
|
// Now create and add a PaymentCreationInfo to the bucket.
|
|
|
|
c := &PaymentCreationInfo{
|
|
|
|
PaymentHash: paymentHash,
|
|
|
|
Value: payment.Terms.Value,
|
|
|
|
CreationDate: payment.CreationDate,
|
|
|
|
PaymentRequest: payment.PaymentRequest,
|
|
|
|
}
|
|
|
|
|
|
|
|
var infoBuf bytes.Buffer
|
|
|
|
if err := serializePaymentCreationInfo(&infoBuf, c); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do the same for the PaymentAttemptInfo.
|
|
|
|
totalAmt := payment.Terms.Value + payment.Fee
|
|
|
|
rt := route.Route{
|
|
|
|
TotalTimeLock: payment.TimeLockLength,
|
|
|
|
TotalAmount: totalAmt,
|
|
|
|
SourcePubKey: sourcePubKey,
|
|
|
|
Hops: []*route.Hop{},
|
|
|
|
}
|
|
|
|
for _, hop := range payment.Path {
|
|
|
|
rt.Hops = append(rt.Hops, &route.Hop{
|
|
|
|
PubKeyBytes: hop,
|
|
|
|
AmtToForward: totalAmt,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Since the old format didn't store the fee for individual
|
|
|
|
// hops, we let the last hop eat the whole fee for the total to
|
|
|
|
// add up.
|
|
|
|
if len(rt.Hops) > 0 {
|
|
|
|
rt.Hops[len(rt.Hops)-1].AmtToForward = payment.Terms.Value
|
|
|
|
}
|
|
|
|
|
|
|
|
// Since we don't have the session key for old payments, we
|
|
|
|
// create a random one to be able to serialize the attempt
|
|
|
|
// info.
|
|
|
|
priv, _ := btcec.NewPrivateKey(btcec.S256())
|
|
|
|
s := &PaymentAttemptInfo{
|
|
|
|
PaymentID: 0, // unknown.
|
|
|
|
SessionKey: priv, // unknown.
|
|
|
|
Route: rt,
|
|
|
|
}
|
|
|
|
|
|
|
|
var attemptBuf bytes.Buffer
|
|
|
|
if err := serializePaymentAttemptInfo(&attemptBuf, s); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reuse the existing payment sequence number.
|
|
|
|
var seqNum [8]byte
|
|
|
|
copy(seqNum[:], k)
|
|
|
|
|
|
|
|
// Create a bucket indexed by the payment hash.
|
|
|
|
bucket, err := newPayments.CreateBucket(paymentHash[:])
|
|
|
|
|
|
|
|
// If the bucket already exists, it means that we are migrating
|
|
|
|
// from a database containing duplicate payments to a payment
|
|
|
|
// hash. To keep this information, we store such duplicate
|
|
|
|
// payments in a sub-bucket.
|
|
|
|
if err == bbolt.ErrBucketExists {
|
|
|
|
pHashBucket := newPayments.Bucket(paymentHash[:])
|
|
|
|
|
|
|
|
// Create a bucket for duplicate payments within this
|
|
|
|
// payment hash's bucket.
|
|
|
|
dup, err := pHashBucket.CreateBucketIfNotExists(
|
|
|
|
paymentDuplicateBucket,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Each duplicate will get its own sub-bucket within
|
|
|
|
// this bucket, so use their sequence number to index
|
|
|
|
// them by.
|
|
|
|
bucket, err = dup.CreateBucket(seqNum[:])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
} else if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Store the payment's information to the bucket.
|
|
|
|
err = bucket.Put(paymentSequenceKey, seqNum[:])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = bucket.Put(paymentCreationInfoKey, infoBuf.Bytes())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = bucket.Put(paymentAttemptInfoKey, attemptBuf.Bytes())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = bucket.Put(paymentSettleInfoKey, payment.PaymentPreimage[:])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// To continue producing unique sequence numbers, we set the sequence
|
|
|
|
// of the new bucket to that of the old one.
|
|
|
|
seq := oldPayments.Sequence()
|
|
|
|
if err := newPayments.SetSequence(seq); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now we delete the old buckets. Deleting the payment status buckets
|
|
|
|
// deletes all payment statuses other than Complete.
|
|
|
|
err = tx.DeleteBucket(paymentStatusBucket)
|
|
|
|
if err != nil && err != bbolt.ErrBucketNotFound {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally delete the old payment bucket.
|
|
|
|
err = tx.DeleteBucket(paymentBucket)
|
|
|
|
if err != nil && err != bbolt.ErrBucketNotFound {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infof("Migration of outgoing payment bucket structure completed!")
|
|
|
|
return nil
|
|
|
|
}
|