Merge pull request #3633 from joostjager/safe-migrations

channeldb: isolate migrations
This commit is contained in:
Olaoluwa Osuntokun 2019-11-01 18:28:56 -07:00 committed by GitHub
commit 38e313a869
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 4731 additions and 176 deletions

@ -10,6 +10,9 @@ run:
skip-files:
- "mobile\\/.*generated\\.go"
skip-dirs:
- channeldb/migration_01_to_11
build-tags:
- autopilotrpc
- chainrpc

@ -13,6 +13,7 @@ import (
"github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/channeldb/migration_01_to_11"
"github.com/lightningnetwork/lnd/lnwire"
)
@ -47,19 +48,19 @@ var (
// for the update time of node and channel updates were
// added.
number: 1,
migration: migrateNodeAndEdgeUpdateIndex,
migration: migration_01_to_11.MigrateNodeAndEdgeUpdateIndex,
},
{
// The DB version that added the invoice event time
// series.
number: 2,
migration: migrateInvoiceTimeSeries,
migration: migration_01_to_11.MigrateInvoiceTimeSeries,
},
{
// The DB version that updated the embedded invoice in
// outgoing payments to match the new format.
number: 3,
migration: migrateInvoiceTimeSeriesOutgoingPayments,
migration: migration_01_to_11.MigrateInvoiceTimeSeriesOutgoingPayments,
},
{
// The version of the database where every channel
@ -67,53 +68,53 @@ var (
// a policy is unknown, this will be represented
// by a special byte sequence.
number: 4,
migration: migrateEdgePolicies,
migration: migration_01_to_11.MigrateEdgePolicies,
},
{
// The DB version where we persist each attempt to send
// an HTLC to a payment hash, and track whether the
// payment is in-flight, succeeded, or failed.
number: 5,
migration: paymentStatusesMigration,
migration: migration_01_to_11.PaymentStatusesMigration,
},
{
// The DB version that properly prunes stale entries
// from the edge update index.
number: 6,
migration: migratePruneEdgeUpdateIndex,
migration: migration_01_to_11.MigratePruneEdgeUpdateIndex,
},
{
// The DB version that migrates the ChannelCloseSummary
// to a format where optional fields are indicated with
// boolean flags.
number: 7,
migration: migrateOptionalChannelCloseSummaryFields,
migration: migration_01_to_11.MigrateOptionalChannelCloseSummaryFields,
},
{
// The DB version that changes the gossiper's message
// store keys to account for the message's type and
// ShortChannelID.
number: 8,
migration: migrateGossipMessageStoreKeys,
migration: migration_01_to_11.MigrateGossipMessageStoreKeys,
},
{
// The DB version where the payments and payment
// statuses are moved to being stored in a combined
// bucket.
number: 9,
migration: migrateOutgoingPayments,
migration: migration_01_to_11.MigrateOutgoingPayments,
},
{
// The DB version where we started to store legacy
// payload information for all routes, as well as the
// optional TLV records.
number: 10,
migration: migrateRouteSerialization,
migration: migration_01_to_11.MigrateRouteSerialization,
},
{
// Add invoice htlc and cltv delta fields.
number: 11,
migration: migrateInvoices,
migration: migration_01_to_11.MigrateInvoices,
},
}
@ -266,10 +267,6 @@ func createChannelDB(dbPath string) error {
return err
}
if _, err := tx.CreateBucket(paymentBucket); err != nil {
return err
}
if _, err := tx.CreateBucket(nodeInfoBucket); err != nil {
return err
}

@ -3,6 +3,7 @@ package channeldb
import (
"github.com/btcsuite/btclog"
"github.com/lightningnetwork/lnd/build"
"github.com/lightningnetwork/lnd/channeldb/migration_01_to_11"
)
// log is a logger that is initialized with no output filters. This
@ -25,4 +26,5 @@ func DisableLog() {
// using btclog.
func UseLogger(logger btclog.Logger) {
log = logger
migration_01_to_11.UseLogger(logger)
}

@ -0,0 +1,221 @@
package migration_01_to_11
import (
"encoding/binary"
"errors"
"fmt"
"io"
"net"
"github.com/lightningnetwork/lnd/tor"
)
// addressType specifies the network protocol and version that should be used
// when connecting to a node at a particular address.
type addressType uint8
const (
// tcp4Addr denotes an IPv4 TCP address.
tcp4Addr addressType = 0
// tcp6Addr denotes an IPv6 TCP address.
tcp6Addr addressType = 1
// v2OnionAddr denotes a version 2 Tor onion service address.
v2OnionAddr addressType = 2
// v3OnionAddr denotes a version 3 Tor (prop224) onion service address.
v3OnionAddr addressType = 3
)
// encodeTCPAddr serializes a TCP address into its compact raw bytes
// representation.
func encodeTCPAddr(w io.Writer, addr *net.TCPAddr) error {
var (
addrType byte
ip []byte
)
if addr.IP.To4() != nil {
addrType = byte(tcp4Addr)
ip = addr.IP.To4()
} else {
addrType = byte(tcp6Addr)
ip = addr.IP.To16()
}
if ip == nil {
return fmt.Errorf("unable to encode IP %v", addr.IP)
}
if _, err := w.Write([]byte{addrType}); err != nil {
return err
}
if _, err := w.Write(ip); err != nil {
return err
}
var port [2]byte
byteOrder.PutUint16(port[:], uint16(addr.Port))
if _, err := w.Write(port[:]); err != nil {
return err
}
return nil
}
// encodeOnionAddr serializes an onion address into its compact raw bytes
// representation.
func encodeOnionAddr(w io.Writer, addr *tor.OnionAddr) error {
var suffixIndex int
hostLen := len(addr.OnionService)
switch hostLen {
case tor.V2Len:
if _, err := w.Write([]byte{byte(v2OnionAddr)}); err != nil {
return err
}
suffixIndex = tor.V2Len - tor.OnionSuffixLen
case tor.V3Len:
if _, err := w.Write([]byte{byte(v3OnionAddr)}); err != nil {
return err
}
suffixIndex = tor.V3Len - tor.OnionSuffixLen
default:
return errors.New("unknown onion service length")
}
suffix := addr.OnionService[suffixIndex:]
if suffix != tor.OnionSuffix {
return fmt.Errorf("invalid suffix \"%v\"", suffix)
}
host, err := tor.Base32Encoding.DecodeString(
addr.OnionService[:suffixIndex],
)
if err != nil {
return err
}
// Sanity check the decoded length.
switch {
case hostLen == tor.V2Len && len(host) != tor.V2DecodedLen:
return fmt.Errorf("onion service %v decoded to invalid host %x",
addr.OnionService, host)
case hostLen == tor.V3Len && len(host) != tor.V3DecodedLen:
return fmt.Errorf("onion service %v decoded to invalid host %x",
addr.OnionService, host)
}
if _, err := w.Write(host); err != nil {
return err
}
var port [2]byte
byteOrder.PutUint16(port[:], uint16(addr.Port))
if _, err := w.Write(port[:]); err != nil {
return err
}
return nil
}
// deserializeAddr reads the serialized raw representation of an address and
// deserializes it into the actual address. This allows us to avoid address
// resolution within the channeldb package.
func deserializeAddr(r io.Reader) (net.Addr, error) {
var addrType [1]byte
if _, err := r.Read(addrType[:]); err != nil {
return nil, err
}
var address net.Addr
switch addressType(addrType[0]) {
case tcp4Addr:
var ip [4]byte
if _, err := r.Read(ip[:]); err != nil {
return nil, err
}
var port [2]byte
if _, err := r.Read(port[:]); err != nil {
return nil, err
}
address = &net.TCPAddr{
IP: net.IP(ip[:]),
Port: int(binary.BigEndian.Uint16(port[:])),
}
case tcp6Addr:
var ip [16]byte
if _, err := r.Read(ip[:]); err != nil {
return nil, err
}
var port [2]byte
if _, err := r.Read(port[:]); err != nil {
return nil, err
}
address = &net.TCPAddr{
IP: net.IP(ip[:]),
Port: int(binary.BigEndian.Uint16(port[:])),
}
case v2OnionAddr:
var h [tor.V2DecodedLen]byte
if _, err := r.Read(h[:]); err != nil {
return nil, err
}
var p [2]byte
if _, err := r.Read(p[:]); err != nil {
return nil, err
}
onionService := tor.Base32Encoding.EncodeToString(h[:])
onionService += tor.OnionSuffix
port := int(binary.BigEndian.Uint16(p[:]))
address = &tor.OnionAddr{
OnionService: onionService,
Port: port,
}
case v3OnionAddr:
var h [tor.V3DecodedLen]byte
if _, err := r.Read(h[:]); err != nil {
return nil, err
}
var p [2]byte
if _, err := r.Read(p[:]); err != nil {
return nil, err
}
onionService := tor.Base32Encoding.EncodeToString(h[:])
onionService += tor.OnionSuffix
port := int(binary.BigEndian.Uint16(p[:]))
address = &tor.OnionAddr{
OnionService: onionService,
Port: port,
}
default:
return nil, ErrUnknownAddressType
}
return address, nil
}
// serializeAddr serializes an address into its raw bytes representation so that
// it can be deserialized without requiring address resolution.
func serializeAddr(w io.Writer, address net.Addr) error {
switch addr := address.(type) {
case *net.TCPAddr:
return encodeTCPAddr(w, addr)
case *tor.OnionAddr:
return encodeOnionAddr(w, addr)
default:
return ErrUnknownAddressType
}
}

@ -0,0 +1,751 @@
package migration_01_to_11
import (
"errors"
"fmt"
"io"
"strconv"
"strings"
"sync"
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/shachain"
)
var (
// closedChannelBucket stores summarization information concerning
// previously open, but now closed channels.
closedChannelBucket = []byte("closed-chan-bucket")
// openChanBucket stores all the currently open channels. This bucket
// has a second, nested bucket which is keyed by a node's ID. Within
// that node ID bucket, all attributes required to track, update, and
// close a channel are stored.
//
// openChan -> nodeID -> chanPoint
//
// TODO(roasbeef): flesh out comment
openChannelBucket = []byte("open-chan-bucket")
)
// ChannelType is an enum-like type that describes one of several possible
// channel types. Each open channel is associated with a particular type as the
// channel type may determine how higher level operations are conducted such as
// fee negotiation, channel closing, the format of HTLCs, etc.
// TODO(roasbeef): split up per-chain?
type ChannelType uint8
const (
// NOTE: iota isn't used here for this enum needs to be stable
// long-term as it will be persisted to the database.
// SingleFunder represents a channel wherein one party solely funds the
// entire capacity of the channel.
SingleFunder ChannelType = 0
)
// ChannelConstraints represents a set of constraints meant to allow a node to
// limit their exposure, enact flow control and ensure that all HTLCs are
// economically relevant. This struct will be mirrored for both sides of the
// channel, as each side will enforce various constraints that MUST be adhered
// to for the life time of the channel. The parameters for each of these
// constraints are static for the duration of the channel, meaning the channel
// must be torn down for them to change.
type ChannelConstraints struct {
// DustLimit is the threshold (in satoshis) below which any outputs
// should be trimmed. When an output is trimmed, it isn't materialized
// as an actual output, but is instead burned to miner's fees.
DustLimit btcutil.Amount
// ChanReserve is an absolute reservation on the channel for the
// owner of this set of constraints. This means that the current
// settled balance for this node CANNOT dip below the reservation
// amount. This acts as a defense against costless attacks when
// either side no longer has any skin in the game.
ChanReserve btcutil.Amount
// MaxPendingAmount is the maximum pending HTLC value that the
// owner of these constraints can offer the remote node at a
// particular time.
MaxPendingAmount lnwire.MilliSatoshi
// MinHTLC is the minimum HTLC value that the owner of these
// constraints can offer the remote node. If any HTLCs below this
// amount are offered, then the HTLC will be rejected. This, in
// tandem with the dust limit allows a node to regulate the
// smallest HTLC that it deems economically relevant.
MinHTLC lnwire.MilliSatoshi
// MaxAcceptedHtlcs is the maximum number of HTLCs that the owner of
// this set of constraints can offer the remote node. This allows each
// node to limit their over all exposure to HTLCs that may need to be
// acted upon in the case of a unilateral channel closure or a contract
// breach.
MaxAcceptedHtlcs uint16
// CsvDelay is the relative time lock delay expressed in blocks. Any
// settled outputs that pay to the owner of this channel configuration
// MUST ensure that the delay branch uses this value as the relative
// time lock. Similarly, any HTLC's offered by this node should use
// this value as well.
CsvDelay uint16
}
// ChannelConfig is a struct that houses the various configuration opens for
// channels. Each side maintains an instance of this configuration file as it
// governs: how the funding and commitment transaction to be created, the
// nature of HTLC's allotted, the keys to be used for delivery, and relative
// time lock parameters.
type ChannelConfig struct {
// ChannelConstraints is the set of constraints that must be upheld for
// the duration of the channel for the owner of this channel
// configuration. Constraints govern a number of flow control related
// parameters, also including the smallest HTLC that will be accepted
// by a participant.
ChannelConstraints
// MultiSigKey is the key to be used within the 2-of-2 output script
// for the owner of this channel config.
MultiSigKey keychain.KeyDescriptor
// RevocationBasePoint is the base public key to be used when deriving
// revocation keys for the remote node's commitment transaction. This
// will be combined along with a per commitment secret to derive a
// unique revocation key for each state.
RevocationBasePoint keychain.KeyDescriptor
// PaymentBasePoint is the base public key to be used when deriving
// the key used within the non-delayed pay-to-self output on the
// commitment transaction for a node. This will be combined with a
// tweak derived from the per-commitment point to ensure unique keys
// for each commitment transaction.
PaymentBasePoint keychain.KeyDescriptor
// DelayBasePoint is the base public key to be used when deriving the
// key used within the delayed pay-to-self output on the commitment
// transaction for a node. This will be combined with a tweak derived
// from the per-commitment point to ensure unique keys for each
// commitment transaction.
DelayBasePoint keychain.KeyDescriptor
// HtlcBasePoint is the base public key to be used when deriving the
// local HTLC key. The derived key (combined with the tweak derived
// from the per-commitment point) is used within the "to self" clause
// within any HTLC output scripts.
HtlcBasePoint keychain.KeyDescriptor
}
// ChannelCommitment is a snapshot of the commitment state at a particular
// point in the commitment chain. With each state transition, a snapshot of the
// current state along with all non-settled HTLCs are recorded. These snapshots
// detail the state of the _remote_ party's commitment at a particular state
// number. For ourselves (the local node) we ONLY store our most recent
// (unrevoked) state for safety purposes.
type ChannelCommitment struct {
// CommitHeight is the update number that this ChannelDelta represents
// the total number of commitment updates to this point. This can be
// viewed as sort of a "commitment height" as this number is
// monotonically increasing.
CommitHeight uint64
// LocalLogIndex is the cumulative log index index of the local node at
// this point in the commitment chain. This value will be incremented
// for each _update_ added to the local update log.
LocalLogIndex uint64
// LocalHtlcIndex is the current local running HTLC index. This value
// will be incremented for each outgoing HTLC the local node offers.
LocalHtlcIndex uint64
// RemoteLogIndex is the cumulative log index index of the remote node
// at this point in the commitment chain. This value will be
// incremented for each _update_ added to the remote update log.
RemoteLogIndex uint64
// RemoteHtlcIndex is the current remote running HTLC index. This value
// will be incremented for each outgoing HTLC the remote node offers.
RemoteHtlcIndex uint64
// LocalBalance is the current available settled balance within the
// channel directly spendable by us.
LocalBalance lnwire.MilliSatoshi
// RemoteBalance is the current available settled balance within the
// channel directly spendable by the remote node.
RemoteBalance lnwire.MilliSatoshi
// CommitFee is the amount calculated to be paid in fees for the
// current set of commitment transactions. The fee amount is persisted
// with the channel in order to allow the fee amount to be removed and
// recalculated with each channel state update, including updates that
// happen after a system restart.
CommitFee btcutil.Amount
// FeePerKw is the min satoshis/kilo-weight that should be paid within
// the commitment transaction for the entire duration of the channel's
// lifetime. This field may be updated during normal operation of the
// channel as on-chain conditions change.
//
// TODO(halseth): make this SatPerKWeight. Cannot be done atm because
// this will cause the import cycle lnwallet<->channeldb. Fee
// estimation stuff should be in its own package.
FeePerKw btcutil.Amount
// CommitTx is the latest version of the commitment state, broadcast
// able by us.
CommitTx *wire.MsgTx
// CommitSig is one half of the signature required to fully complete
// the script for the commitment transaction above. This is the
// signature signed by the remote party for our version of the
// commitment transactions.
CommitSig []byte
// Htlcs is the set of HTLC's that are pending at this particular
// commitment height.
Htlcs []HTLC
// TODO(roasbeef): pending commit pointer?
// * lets just walk through
}
// ChannelStatus is a bit vector used to indicate whether an OpenChannel is in
// the default usable state, or a state where it shouldn't be used.
type ChannelStatus uint8
var (
// ChanStatusDefault is the normal state of an open channel.
ChanStatusDefault ChannelStatus
// ChanStatusBorked indicates that the channel has entered an
// irreconcilable state, triggered by a state desynchronization or
// channel breach. Channels in this state should never be added to the
// htlc switch.
ChanStatusBorked ChannelStatus = 1
// ChanStatusCommitBroadcasted indicates that a commitment for this
// channel has been broadcasted.
ChanStatusCommitBroadcasted ChannelStatus = 1 << 1
// ChanStatusLocalDataLoss indicates that we have lost channel state
// for this channel, and broadcasting our latest commitment might be
// considered a breach.
//
// TODO(halseh): actually enforce that we are not force closing such a
// channel.
ChanStatusLocalDataLoss ChannelStatus = 1 << 2
// ChanStatusRestored is a status flag that signals that the channel
// has been restored, and doesn't have all the fields a typical channel
// will have.
ChanStatusRestored ChannelStatus = 1 << 3
)
// chanStatusStrings maps a ChannelStatus to a human friendly string that
// describes that status.
var chanStatusStrings = map[ChannelStatus]string{
ChanStatusDefault: "ChanStatusDefault",
ChanStatusBorked: "ChanStatusBorked",
ChanStatusCommitBroadcasted: "ChanStatusCommitBroadcasted",
ChanStatusLocalDataLoss: "ChanStatusLocalDataLoss",
ChanStatusRestored: "ChanStatusRestored",
}
// orderedChanStatusFlags is an in-order list of all that channel status flags.
var orderedChanStatusFlags = []ChannelStatus{
ChanStatusDefault,
ChanStatusBorked,
ChanStatusCommitBroadcasted,
ChanStatusLocalDataLoss,
ChanStatusRestored,
}
// String returns a human-readable representation of the ChannelStatus.
func (c ChannelStatus) String() string {
// If no flags are set, then this is the default case.
if c == 0 {
return chanStatusStrings[ChanStatusDefault]
}
// Add individual bit flags.
statusStr := ""
for _, flag := range orderedChanStatusFlags {
if c&flag == flag {
statusStr += chanStatusStrings[flag] + "|"
c -= flag
}
}
// Remove anything to the right of the final bar, including it as well.
statusStr = strings.TrimRight(statusStr, "|")
// Add any remaining flags which aren't accounted for as hex.
if c != 0 {
statusStr += "|0x" + strconv.FormatUint(uint64(c), 16)
}
// If this was purely an unknown flag, then remove the extra bar at the
// start of the string.
statusStr = strings.TrimLeft(statusStr, "|")
return statusStr
}
// OpenChannel encapsulates the persistent and dynamic state of an open channel
// with a remote node. An open channel supports several options for on-disk
// serialization depending on the exact context. Full (upon channel creation)
// state commitments, and partial (due to a commitment update) writes are
// supported. Each partial write due to a state update appends the new update
// to an on-disk log, which can then subsequently be queried in order to
// "time-travel" to a prior state.
type OpenChannel struct {
// ChanType denotes which type of channel this is.
ChanType ChannelType
// ChainHash is a hash which represents the blockchain that this
// channel will be opened within. This value is typically the genesis
// hash. In the case that the original chain went through a contentious
// hard-fork, then this value will be tweaked using the unique fork
// point on each branch.
ChainHash chainhash.Hash
// FundingOutpoint is the outpoint of the final funding transaction.
// This value uniquely and globally identifies the channel within the
// target blockchain as specified by the chain hash parameter.
FundingOutpoint wire.OutPoint
// ShortChannelID encodes the exact location in the chain in which the
// channel was initially confirmed. This includes: the block height,
// transaction index, and the output within the target transaction.
ShortChannelID lnwire.ShortChannelID
// IsPending indicates whether a channel's funding transaction has been
// confirmed.
IsPending bool
// IsInitiator is a bool which indicates if we were the original
// initiator for the channel. This value may affect how higher levels
// negotiate fees, or close the channel.
IsInitiator bool
// FundingBroadcastHeight is the height in which the funding
// transaction was broadcast. This value can be used by higher level
// sub-systems to determine if a channel is stale and/or should have
// been confirmed before a certain height.
FundingBroadcastHeight uint32
// NumConfsRequired is the number of confirmations a channel's funding
// transaction must have received in order to be considered available
// for normal transactional use.
NumConfsRequired uint16
// ChannelFlags holds the flags that were sent as part of the
// open_channel message.
ChannelFlags lnwire.FundingFlag
// IdentityPub is the identity public key of the remote node this
// channel has been established with.
IdentityPub *btcec.PublicKey
// Capacity is the total capacity of this channel.
Capacity btcutil.Amount
// TotalMSatSent is the total number of milli-satoshis we've sent
// within this channel.
TotalMSatSent lnwire.MilliSatoshi
// TotalMSatReceived is the total number of milli-satoshis we've
// received within this channel.
TotalMSatReceived lnwire.MilliSatoshi
// LocalChanCfg is the channel configuration for the local node.
LocalChanCfg ChannelConfig
// RemoteChanCfg is the channel configuration for the remote node.
RemoteChanCfg ChannelConfig
// LocalCommitment is the current local commitment state for the local
// party. This is stored distinct from the state of the remote party
// as there are certain asymmetric parameters which affect the
// structure of each commitment.
LocalCommitment ChannelCommitment
// RemoteCommitment is the current remote commitment state for the
// remote party. This is stored distinct from the state of the local
// party as there are certain asymmetric parameters which affect the
// structure of each commitment.
RemoteCommitment ChannelCommitment
// RemoteCurrentRevocation is the current revocation for their
// commitment transaction. However, since this the derived public key,
// we don't yet have the private key so we aren't yet able to verify
// that it's actually in the hash chain.
RemoteCurrentRevocation *btcec.PublicKey
// RemoteNextRevocation is the revocation key to be used for the *next*
// commitment transaction we create for the local node. Within the
// specification, this value is referred to as the
// per-commitment-point.
RemoteNextRevocation *btcec.PublicKey
// RevocationProducer is used to generate the revocation in such a way
// that remote side might store it efficiently and have the ability to
// restore the revocation by index if needed. Current implementation of
// secret producer is shachain producer.
RevocationProducer shachain.Producer
// RevocationStore is used to efficiently store the revocations for
// previous channels states sent to us by remote side. Current
// implementation of secret store is shachain store.
RevocationStore shachain.Store
// FundingTxn is the transaction containing this channel's funding
// outpoint. Upon restarts, this txn will be rebroadcast if the channel
// is found to be pending.
//
// NOTE: This value will only be populated for single-funder channels
// for which we are the initiator.
FundingTxn *wire.MsgTx
// TODO(roasbeef): eww
Db *DB
// TODO(roasbeef): just need to store local and remote HTLC's?
sync.RWMutex
}
// ShortChanID returns the current ShortChannelID of this channel.
func (c *OpenChannel) ShortChanID() lnwire.ShortChannelID {
c.RLock()
defer c.RUnlock()
return c.ShortChannelID
}
// HTLC is the on-disk representation of a hash time-locked contract. HTLCs are
// contained within ChannelDeltas which encode the current state of the
// commitment between state updates.
//
// TODO(roasbeef): save space by using smaller ints at tail end?
type HTLC struct {
// Signature is the signature for the second level covenant transaction
// for this HTLC. The second level transaction is a timeout tx in the
// case that this is an outgoing HTLC, and a success tx in the case
// that this is an incoming HTLC.
//
// TODO(roasbeef): make [64]byte instead?
Signature []byte
// RHash is the payment hash of the HTLC.
RHash [32]byte
// Amt is the amount of milli-satoshis this HTLC escrows.
Amt lnwire.MilliSatoshi
// RefundTimeout is the absolute timeout on the HTLC that the sender
// must wait before reclaiming the funds in limbo.
RefundTimeout uint32
// OutputIndex is the output index for this particular HTLC output
// within the commitment transaction.
OutputIndex int32
// Incoming denotes whether we're the receiver or the sender of this
// HTLC.
Incoming bool
// OnionBlob is an opaque blob which is used to complete multi-hop
// routing.
OnionBlob []byte
// HtlcIndex is the HTLC counter index of this active, outstanding
// HTLC. This differs from the LogIndex, as the HtlcIndex is only
// incremented for each offered HTLC, while they LogIndex is
// incremented for each update (includes settle+fail).
HtlcIndex uint64
// LogIndex is the cumulative log index of this HTLC. This differs
// from the HtlcIndex as this will be incremented for each new log
// update added.
LogIndex uint64
}
// CircuitKey is used by a channel to uniquely identify the HTLCs it receives
// from the switch, and is used to purge our in-memory state of HTLCs that have
// already been processed by a link. Two list of CircuitKeys are included in
// each CommitDiff to allow a link to determine which in-memory htlcs directed
// the opening and closing of circuits in the switch's circuit map.
type CircuitKey struct {
// ChanID is the short chanid indicating the HTLC's origin.
//
// NOTE: It is fine for this value to be blank, as this indicates a
// locally-sourced payment.
ChanID lnwire.ShortChannelID
// HtlcID is the unique htlc index predominately assigned by links,
// though can also be assigned by switch in the case of locally-sourced
// payments.
HtlcID uint64
}
// String returns a string representation of the CircuitKey.
func (k CircuitKey) String() string {
return fmt.Sprintf("(Chan ID=%s, HTLC ID=%d)", k.ChanID, k.HtlcID)
}
// ClosureType is an enum like structure that details exactly _how_ a channel
// was closed. Three closure types are currently possible: none, cooperative,
// local force close, remote force close, and (remote) breach.
type ClosureType uint8
const (
// RemoteForceClose indicates that the remote peer has unilaterally
// broadcast their current commitment state on-chain.
RemoteForceClose ClosureType = 4
)
// ChannelCloseSummary contains the final state of a channel at the point it
// was closed. Once a channel is closed, all the information pertaining to that
// channel within the openChannelBucket is deleted, and a compact summary is
// put in place instead.
type ChannelCloseSummary struct {
// ChanPoint is the outpoint for this channel's funding transaction,
// and is used as a unique identifier for the channel.
ChanPoint wire.OutPoint
// ShortChanID encodes the exact location in the chain in which the
// channel was initially confirmed. This includes: the block height,
// transaction index, and the output within the target transaction.
ShortChanID lnwire.ShortChannelID
// ChainHash is the hash of the genesis block that this channel resides
// within.
ChainHash chainhash.Hash
// ClosingTXID is the txid of the transaction which ultimately closed
// this channel.
ClosingTXID chainhash.Hash
// RemotePub is the public key of the remote peer that we formerly had
// a channel with.
RemotePub *btcec.PublicKey
// Capacity was the total capacity of the channel.
Capacity btcutil.Amount
// CloseHeight is the height at which the funding transaction was
// spent.
CloseHeight uint32
// SettledBalance is our total balance settled balance at the time of
// channel closure. This _does not_ include the sum of any outputs that
// have been time-locked as a result of the unilateral channel closure.
SettledBalance btcutil.Amount
// TimeLockedBalance is the sum of all the time-locked outputs at the
// time of channel closure. If we triggered the force closure of this
// channel, then this value will be non-zero if our settled output is
// above the dust limit. If we were on the receiving side of a channel
// force closure, then this value will be non-zero if we had any
// outstanding outgoing HTLC's at the time of channel closure.
TimeLockedBalance btcutil.Amount
// CloseType details exactly _how_ the channel was closed. Five closure
// types are possible: cooperative, local force, remote force, breach
// and funding canceled.
CloseType ClosureType
// IsPending indicates whether this channel is in the 'pending close'
// state, which means the channel closing transaction has been
// confirmed, but not yet been fully resolved. In the case of a channel
// that has been cooperatively closed, it will go straight into the
// fully resolved state as soon as the closing transaction has been
// confirmed. However, for channels that have been force closed, they'll
// stay marked as "pending" until _all_ the pending funds have been
// swept.
IsPending bool
// RemoteCurrentRevocation is the current revocation for their
// commitment transaction. However, since this is the derived public key,
// we don't yet have the private key so we aren't yet able to verify
// that it's actually in the hash chain.
RemoteCurrentRevocation *btcec.PublicKey
// RemoteNextRevocation is the revocation key to be used for the *next*
// commitment transaction we create for the local node. Within the
// specification, this value is referred to as the
// per-commitment-point.
RemoteNextRevocation *btcec.PublicKey
// LocalChanCfg is the channel configuration for the local node.
LocalChanConfig ChannelConfig
// LastChanSyncMsg is the ChannelReestablish message for this channel
// for the state at the point where it was closed.
LastChanSyncMsg *lnwire.ChannelReestablish
}
func serializeChannelCloseSummary(w io.Writer, cs *ChannelCloseSummary) error {
err := WriteElements(w,
cs.ChanPoint, cs.ShortChanID, cs.ChainHash, cs.ClosingTXID,
cs.CloseHeight, cs.RemotePub, cs.Capacity, cs.SettledBalance,
cs.TimeLockedBalance, cs.CloseType, cs.IsPending,
)
if err != nil {
return err
}
// If this is a close channel summary created before the addition of
// the new fields, then we can exit here.
if cs.RemoteCurrentRevocation == nil {
return WriteElements(w, false)
}
// If fields are present, write boolean to indicate this, and continue.
if err := WriteElements(w, true); err != nil {
return err
}
if err := WriteElements(w, cs.RemoteCurrentRevocation); err != nil {
return err
}
if err := writeChanConfig(w, &cs.LocalChanConfig); err != nil {
return err
}
// The RemoteNextRevocation field is optional, as it's possible for a
// channel to be closed before we learn of the next unrevoked
// revocation point for the remote party. Write a boolen indicating
// whether this field is present or not.
if err := WriteElements(w, cs.RemoteNextRevocation != nil); err != nil {
return err
}
// Write the field, if present.
if cs.RemoteNextRevocation != nil {
if err = WriteElements(w, cs.RemoteNextRevocation); err != nil {
return err
}
}
// Write whether the channel sync message is present.
if err := WriteElements(w, cs.LastChanSyncMsg != nil); err != nil {
return err
}
// Write the channel sync message, if present.
if cs.LastChanSyncMsg != nil {
if err := WriteElements(w, cs.LastChanSyncMsg); err != nil {
return err
}
}
return nil
}
func deserializeCloseChannelSummary(r io.Reader) (*ChannelCloseSummary, error) {
c := &ChannelCloseSummary{}
err := ReadElements(r,
&c.ChanPoint, &c.ShortChanID, &c.ChainHash, &c.ClosingTXID,
&c.CloseHeight, &c.RemotePub, &c.Capacity, &c.SettledBalance,
&c.TimeLockedBalance, &c.CloseType, &c.IsPending,
)
if err != nil {
return nil, err
}
// We'll now check to see if the channel close summary was encoded with
// any of the additional optional fields.
var hasNewFields bool
err = ReadElements(r, &hasNewFields)
if err != nil {
return nil, err
}
// If fields are not present, we can return.
if !hasNewFields {
return c, nil
}
// Otherwise read the new fields.
if err := ReadElements(r, &c.RemoteCurrentRevocation); err != nil {
return nil, err
}
if err := readChanConfig(r, &c.LocalChanConfig); err != nil {
return nil, err
}
// Finally, we'll attempt to read the next unrevoked commitment point
// for the remote party. If we closed the channel before receiving a
// funding locked message then this might not be present. A boolean
// indicating whether the field is present will come first.
var hasRemoteNextRevocation bool
err = ReadElements(r, &hasRemoteNextRevocation)
if err != nil {
return nil, err
}
// If this field was written, read it.
if hasRemoteNextRevocation {
err = ReadElements(r, &c.RemoteNextRevocation)
if err != nil {
return nil, err
}
}
// Check if we have a channel sync message to read.
var hasChanSyncMsg bool
err = ReadElements(r, &hasChanSyncMsg)
if err == io.EOF {
return c, nil
} else if err != nil {
return nil, err
}
// If a chan sync message is present, read it.
if hasChanSyncMsg {
// We must pass in reference to a lnwire.Message for the codec
// to support it.
var msg lnwire.Message
if err := ReadElements(r, &msg); err != nil {
return nil, err
}
chanSync, ok := msg.(*lnwire.ChannelReestablish)
if !ok {
return nil, errors.New("unable cast db Message to " +
"ChannelReestablish")
}
c.LastChanSyncMsg = chanSync
}
return c, nil
}
func writeChanConfig(b io.Writer, c *ChannelConfig) error {
return WriteElements(b,
c.DustLimit, c.MaxPendingAmount, c.ChanReserve, c.MinHTLC,
c.MaxAcceptedHtlcs, c.CsvDelay, c.MultiSigKey,
c.RevocationBasePoint, c.PaymentBasePoint, c.DelayBasePoint,
c.HtlcBasePoint,
)
}
func readChanConfig(b io.Reader, c *ChannelConfig) error {
return ReadElements(b,
&c.DustLimit, &c.MaxPendingAmount, &c.ChanReserve,
&c.MinHTLC, &c.MaxAcceptedHtlcs, &c.CsvDelay,
&c.MultiSigKey, &c.RevocationBasePoint,
&c.PaymentBasePoint, &c.DelayBasePoint,
&c.HtlcBasePoint,
)
}

@ -0,0 +1,221 @@
package migration_01_to_11
import (
"bytes"
"io/ioutil"
"math/rand"
"os"
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
_ "github.com/btcsuite/btcwallet/walletdb/bdb"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/shachain"
)
var (
key = [chainhash.HashSize]byte{
0x81, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
0x68, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
0xd, 0xe7, 0x93, 0xe4, 0xb7, 0x25, 0xb8, 0x4d,
0x1e, 0xb, 0x4c, 0xf9, 0x9e, 0xc5, 0x8c, 0xe9,
}
rev = [chainhash.HashSize]byte{
0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
0x2d, 0xe7, 0x93, 0xe4,
}
testTx = &wire.MsgTx{
Version: 1,
TxIn: []*wire.TxIn{
{
PreviousOutPoint: wire.OutPoint{
Hash: chainhash.Hash{},
Index: 0xffffffff,
},
SignatureScript: []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62},
Sequence: 0xffffffff,
},
},
TxOut: []*wire.TxOut{
{
Value: 5000000000,
PkScript: []byte{
0x41, // OP_DATA_65
0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1,
0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24,
0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97,
0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78,
0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20,
0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63,
0xa6, // 65-byte signature
0xac, // OP_CHECKSIG
},
},
},
LockTime: 5,
}
privKey, pubKey = btcec.PrivKeyFromBytes(btcec.S256(), key[:])
)
// makeTestDB creates a new instance of the ChannelDB for testing purposes. A
// callback which cleans up the created temporary directories is also returned
// and intended to be executed after the test completes.
func makeTestDB() (*DB, func(), error) {
// First, create a temporary directory to be used for the duration of
// this test.
tempDirName, err := ioutil.TempDir("", "channeldb")
if err != nil {
return nil, nil, err
}
// Next, create channeldb for the first time.
cdb, err := Open(tempDirName)
if err != nil {
return nil, nil, err
}
cleanUp := func() {
cdb.Close()
os.RemoveAll(tempDirName)
}
return cdb, cleanUp, nil
}
func createTestChannelState(cdb *DB) (*OpenChannel, error) {
// Simulate 1000 channel updates.
producer, err := shachain.NewRevocationProducerFromBytes(key[:])
if err != nil {
return nil, err
}
store := shachain.NewRevocationStore()
for i := 0; i < 1; i++ {
preImage, err := producer.AtIndex(uint64(i))
if err != nil {
return nil, err
}
if err := store.AddNextEntry(preImage); err != nil {
return nil, err
}
}
localCfg := ChannelConfig{
ChannelConstraints: ChannelConstraints{
DustLimit: btcutil.Amount(rand.Int63()),
MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()),
ChanReserve: btcutil.Amount(rand.Int63()),
MinHTLC: lnwire.MilliSatoshi(rand.Int63()),
MaxAcceptedHtlcs: uint16(rand.Int31()),
CsvDelay: uint16(rand.Int31()),
},
MultiSigKey: keychain.KeyDescriptor{
PubKey: privKey.PubKey(),
},
RevocationBasePoint: keychain.KeyDescriptor{
PubKey: privKey.PubKey(),
},
PaymentBasePoint: keychain.KeyDescriptor{
PubKey: privKey.PubKey(),
},
DelayBasePoint: keychain.KeyDescriptor{
PubKey: privKey.PubKey(),
},
HtlcBasePoint: keychain.KeyDescriptor{
PubKey: privKey.PubKey(),
},
}
remoteCfg := ChannelConfig{
ChannelConstraints: ChannelConstraints{
DustLimit: btcutil.Amount(rand.Int63()),
MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()),
ChanReserve: btcutil.Amount(rand.Int63()),
MinHTLC: lnwire.MilliSatoshi(rand.Int63()),
MaxAcceptedHtlcs: uint16(rand.Int31()),
CsvDelay: uint16(rand.Int31()),
},
MultiSigKey: keychain.KeyDescriptor{
PubKey: privKey.PubKey(),
KeyLocator: keychain.KeyLocator{
Family: keychain.KeyFamilyMultiSig,
Index: 9,
},
},
RevocationBasePoint: keychain.KeyDescriptor{
PubKey: privKey.PubKey(),
KeyLocator: keychain.KeyLocator{
Family: keychain.KeyFamilyRevocationBase,
Index: 8,
},
},
PaymentBasePoint: keychain.KeyDescriptor{
PubKey: privKey.PubKey(),
KeyLocator: keychain.KeyLocator{
Family: keychain.KeyFamilyPaymentBase,
Index: 7,
},
},
DelayBasePoint: keychain.KeyDescriptor{
PubKey: privKey.PubKey(),
KeyLocator: keychain.KeyLocator{
Family: keychain.KeyFamilyDelayBase,
Index: 6,
},
},
HtlcBasePoint: keychain.KeyDescriptor{
PubKey: privKey.PubKey(),
KeyLocator: keychain.KeyLocator{
Family: keychain.KeyFamilyHtlcBase,
Index: 5,
},
},
}
chanID := lnwire.NewShortChanIDFromInt(uint64(rand.Int63()))
return &OpenChannel{
ChanType: SingleFunder,
ChainHash: key,
FundingOutpoint: wire.OutPoint{Hash: key, Index: rand.Uint32()},
ShortChannelID: chanID,
IsInitiator: true,
IsPending: true,
IdentityPub: pubKey,
Capacity: btcutil.Amount(10000),
LocalChanCfg: localCfg,
RemoteChanCfg: remoteCfg,
TotalMSatSent: 8,
TotalMSatReceived: 2,
LocalCommitment: ChannelCommitment{
CommitHeight: 0,
LocalBalance: lnwire.MilliSatoshi(9000),
RemoteBalance: lnwire.MilliSatoshi(3000),
CommitFee: btcutil.Amount(rand.Int63()),
FeePerKw: btcutil.Amount(5000),
CommitTx: testTx,
CommitSig: bytes.Repeat([]byte{1}, 71),
},
RemoteCommitment: ChannelCommitment{
CommitHeight: 0,
LocalBalance: lnwire.MilliSatoshi(3000),
RemoteBalance: lnwire.MilliSatoshi(9000),
CommitFee: btcutil.Amount(rand.Int63()),
FeePerKw: btcutil.Amount(5000),
CommitTx: testTx,
CommitSig: bytes.Repeat([]byte{1}, 71),
},
NumConfsRequired: 4,
RemoteCurrentRevocation: privKey.PubKey(),
RemoteNextRevocation: privKey.PubKey(),
RevocationProducer: producer,
RevocationStore: store,
Db: cdb,
FundingTxn: testTx,
}, nil
}

@ -0,0 +1,448 @@
package migration_01_to_11
import (
"encoding/binary"
"fmt"
"io"
"net"
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/shachain"
)
// writeOutpoint writes an outpoint to the passed writer using the minimal
// amount of bytes possible.
func writeOutpoint(w io.Writer, o *wire.OutPoint) error {
if _, err := w.Write(o.Hash[:]); err != nil {
return err
}
if err := binary.Write(w, byteOrder, o.Index); err != nil {
return err
}
return nil
}
// readOutpoint reads an outpoint from the passed reader that was previously
// written using the writeOutpoint struct.
func readOutpoint(r io.Reader, o *wire.OutPoint) error {
if _, err := io.ReadFull(r, o.Hash[:]); err != nil {
return err
}
if err := binary.Read(r, byteOrder, &o.Index); err != nil {
return err
}
return nil
}
// UnknownElementType is an error returned when the codec is unable to encode or
// decode a particular type.
type UnknownElementType struct {
method string
element interface{}
}
// Error returns the name of the method that encountered the error, as well as
// the type that was unsupported.
func (e UnknownElementType) Error() string {
return fmt.Sprintf("Unknown type in %s: %T", e.method, e.element)
}
// WriteElement is a one-stop shop to write the big endian representation of
// any element which is to be serialized for storage on disk. The passed
// io.Writer should be backed by an appropriately sized byte slice, or be able
// to dynamically expand to accommodate additional data.
func WriteElement(w io.Writer, element interface{}) error {
switch e := element.(type) {
case keychain.KeyDescriptor:
if err := binary.Write(w, byteOrder, e.Family); err != nil {
return err
}
if err := binary.Write(w, byteOrder, e.Index); err != nil {
return err
}
if e.PubKey != nil {
if err := binary.Write(w, byteOrder, true); err != nil {
return fmt.Errorf("error writing serialized element: %s", err)
}
return WriteElement(w, e.PubKey)
}
return binary.Write(w, byteOrder, false)
case ChannelType:
if err := binary.Write(w, byteOrder, e); err != nil {
return err
}
case chainhash.Hash:
if _, err := w.Write(e[:]); err != nil {
return err
}
case wire.OutPoint:
return writeOutpoint(w, &e)
case lnwire.ShortChannelID:
if err := binary.Write(w, byteOrder, e.ToUint64()); err != nil {
return err
}
case lnwire.ChannelID:
if _, err := w.Write(e[:]); err != nil {
return err
}
case int64, uint64:
if err := binary.Write(w, byteOrder, e); err != nil {
return err
}
case uint32:
if err := binary.Write(w, byteOrder, e); err != nil {
return err
}
case int32:
if err := binary.Write(w, byteOrder, e); err != nil {
return err
}
case uint16:
if err := binary.Write(w, byteOrder, e); err != nil {
return err
}
case uint8:
if err := binary.Write(w, byteOrder, e); err != nil {
return err
}
case bool:
if err := binary.Write(w, byteOrder, e); err != nil {
return err
}
case btcutil.Amount:
if err := binary.Write(w, byteOrder, uint64(e)); err != nil {
return err
}
case lnwire.MilliSatoshi:
if err := binary.Write(w, byteOrder, uint64(e)); err != nil {
return err
}
case *btcec.PrivateKey:
b := e.Serialize()
if _, err := w.Write(b); err != nil {
return err
}
case *btcec.PublicKey:
b := e.SerializeCompressed()
if _, err := w.Write(b); err != nil {
return err
}
case shachain.Producer:
return e.Encode(w)
case shachain.Store:
return e.Encode(w)
case *wire.MsgTx:
return e.Serialize(w)
case [32]byte:
if _, err := w.Write(e[:]); err != nil {
return err
}
case []byte:
if err := wire.WriteVarBytes(w, 0, e); err != nil {
return err
}
case lnwire.Message:
if _, err := lnwire.WriteMessage(w, e, 0); err != nil {
return err
}
case ChannelStatus:
if err := binary.Write(w, byteOrder, e); err != nil {
return err
}
case ClosureType:
if err := binary.Write(w, byteOrder, e); err != nil {
return err
}
case lnwire.FundingFlag:
if err := binary.Write(w, byteOrder, e); err != nil {
return err
}
case net.Addr:
if err := serializeAddr(w, e); err != nil {
return err
}
case []net.Addr:
if err := WriteElement(w, uint32(len(e))); err != nil {
return err
}
for _, addr := range e {
if err := serializeAddr(w, addr); err != nil {
return err
}
}
default:
return UnknownElementType{"WriteElement", e}
}
return nil
}
// WriteElements is writes each element in the elements slice to the passed
// io.Writer using WriteElement.
func WriteElements(w io.Writer, elements ...interface{}) error {
for _, element := range elements {
err := WriteElement(w, element)
if err != nil {
return err
}
}
return nil
}
// ReadElement is a one-stop utility function to deserialize any datastructure
// encoded using the serialization format of the database.
func ReadElement(r io.Reader, element interface{}) error {
switch e := element.(type) {
case *keychain.KeyDescriptor:
if err := binary.Read(r, byteOrder, &e.Family); err != nil {
return err
}
if err := binary.Read(r, byteOrder, &e.Index); err != nil {
return err
}
var hasPubKey bool
if err := binary.Read(r, byteOrder, &hasPubKey); err != nil {
return err
}
if hasPubKey {
return ReadElement(r, &e.PubKey)
}
case *ChannelType:
if err := binary.Read(r, byteOrder, e); err != nil {
return err
}
case *chainhash.Hash:
if _, err := io.ReadFull(r, e[:]); err != nil {
return err
}
case *wire.OutPoint:
return readOutpoint(r, e)
case *lnwire.ShortChannelID:
var a uint64
if err := binary.Read(r, byteOrder, &a); err != nil {
return err
}
*e = lnwire.NewShortChanIDFromInt(a)
case *lnwire.ChannelID:
if _, err := io.ReadFull(r, e[:]); err != nil {
return err
}
case *int64, *uint64:
if err := binary.Read(r, byteOrder, e); err != nil {
return err
}
case *uint32:
if err := binary.Read(r, byteOrder, e); err != nil {
return err
}
case *int32:
if err := binary.Read(r, byteOrder, e); err != nil {
return err
}
case *uint16:
if err := binary.Read(r, byteOrder, e); err != nil {
return err
}
case *uint8:
if err := binary.Read(r, byteOrder, e); err != nil {
return err
}
case *bool:
if err := binary.Read(r, byteOrder, e); err != nil {
return err
}
case *btcutil.Amount:
var a uint64
if err := binary.Read(r, byteOrder, &a); err != nil {
return err
}
*e = btcutil.Amount(a)
case *lnwire.MilliSatoshi:
var a uint64
if err := binary.Read(r, byteOrder, &a); err != nil {
return err
}
*e = lnwire.MilliSatoshi(a)
case **btcec.PrivateKey:
var b [btcec.PrivKeyBytesLen]byte
if _, err := io.ReadFull(r, b[:]); err != nil {
return err
}
priv, _ := btcec.PrivKeyFromBytes(btcec.S256(), b[:])
*e = priv
case **btcec.PublicKey:
var b [btcec.PubKeyBytesLenCompressed]byte
if _, err := io.ReadFull(r, b[:]); err != nil {
return err
}
pubKey, err := btcec.ParsePubKey(b[:], btcec.S256())
if err != nil {
return err
}
*e = pubKey
case *shachain.Producer:
var root [32]byte
if _, err := io.ReadFull(r, root[:]); err != nil {
return err
}
// TODO(roasbeef): remove
producer, err := shachain.NewRevocationProducerFromBytes(root[:])
if err != nil {
return err
}
*e = producer
case *shachain.Store:
store, err := shachain.NewRevocationStoreFromBytes(r)
if err != nil {
return err
}
*e = store
case **wire.MsgTx:
tx := wire.NewMsgTx(2)
if err := tx.Deserialize(r); err != nil {
return err
}
*e = tx
case *[32]byte:
if _, err := io.ReadFull(r, e[:]); err != nil {
return err
}
case *[]byte:
bytes, err := wire.ReadVarBytes(r, 0, 66000, "[]byte")
if err != nil {
return err
}
*e = bytes
case *lnwire.Message:
msg, err := lnwire.ReadMessage(r, 0)
if err != nil {
return err
}
*e = msg
case *ChannelStatus:
if err := binary.Read(r, byteOrder, e); err != nil {
return err
}
case *ClosureType:
if err := binary.Read(r, byteOrder, e); err != nil {
return err
}
case *lnwire.FundingFlag:
if err := binary.Read(r, byteOrder, e); err != nil {
return err
}
case *net.Addr:
addr, err := deserializeAddr(r)
if err != nil {
return err
}
*e = addr
case *[]net.Addr:
var numAddrs uint32
if err := ReadElement(r, &numAddrs); err != nil {
return err
}
*e = make([]net.Addr, numAddrs)
for i := uint32(0); i < numAddrs; i++ {
addr, err := deserializeAddr(r)
if err != nil {
return err
}
(*e)[i] = addr
}
default:
return UnknownElementType{"ReadElement", e}
}
return nil
}
// ReadElements deserializes a variable number of elements into the passed
// io.Reader, with each element being deserialized according to the ReadElement
// function.
func ReadElements(r io.Reader, elements ...interface{}) error {
for _, element := range elements {
err := ReadElement(r, element)
if err != nil {
return err
}
}
return nil
}

@ -0,0 +1,221 @@
package migration_01_to_11
import (
"bytes"
"encoding/binary"
"fmt"
"os"
"path/filepath"
"time"
"github.com/coreos/bbolt"
)
const (
dbName = "channel.db"
dbFilePermission = 0600
)
// migration is a function which takes a prior outdated version of the database
// instances and mutates the key/bucket structure to arrive at a more
// up-to-date version of the database.
type migration func(tx *bbolt.Tx) error
var (
// Big endian is the preferred byte order, due to cursor scans over
// integer keys iterating in order.
byteOrder = binary.BigEndian
)
// DB is the primary datastore for the lnd daemon. The database stores
// information related to nodes, routing data, open/closed channels, fee
// schedules, and reputation data.
type DB struct {
*bbolt.DB
dbPath string
graph *ChannelGraph
now func() time.Time
}
// Open opens an existing channeldb. Any necessary schemas migrations due to
// updates will take place as necessary.
func Open(dbPath string, modifiers ...OptionModifier) (*DB, error) {
path := filepath.Join(dbPath, dbName)
if !fileExists(path) {
if err := createChannelDB(dbPath); err != nil {
return nil, err
}
}
opts := DefaultOptions()
for _, modifier := range modifiers {
modifier(&opts)
}
// Specify bbolt freelist options to reduce heap pressure in case the
// freelist grows to be very large.
options := &bbolt.Options{
NoFreelistSync: opts.NoFreelistSync,
FreelistType: bbolt.FreelistMapType,
}
bdb, err := bbolt.Open(path, dbFilePermission, options)
if err != nil {
return nil, err
}
chanDB := &DB{
DB: bdb,
dbPath: dbPath,
now: time.Now,
}
chanDB.graph = newChannelGraph(
chanDB, opts.RejectCacheSize, opts.ChannelCacheSize,
)
return chanDB, nil
}
// createChannelDB creates and initializes a fresh version of channeldb. In
// the case that the target path has not yet been created or doesn't yet exist,
// then the path is created. Additionally, all required top-level buckets used
// within the database are created.
func createChannelDB(dbPath string) error {
if !fileExists(dbPath) {
if err := os.MkdirAll(dbPath, 0700); err != nil {
return err
}
}
path := filepath.Join(dbPath, dbName)
bdb, err := bbolt.Open(path, dbFilePermission, nil)
if err != nil {
return err
}
err = bdb.Update(func(tx *bbolt.Tx) error {
if _, err := tx.CreateBucket(openChannelBucket); err != nil {
return err
}
if _, err := tx.CreateBucket(closedChannelBucket); err != nil {
return err
}
if _, err := tx.CreateBucket(invoiceBucket); err != nil {
return err
}
if _, err := tx.CreateBucket(paymentBucket); err != nil {
return err
}
nodes, err := tx.CreateBucket(nodeBucket)
if err != nil {
return err
}
_, err = nodes.CreateBucket(aliasIndexBucket)
if err != nil {
return err
}
_, err = nodes.CreateBucket(nodeUpdateIndexBucket)
if err != nil {
return err
}
edges, err := tx.CreateBucket(edgeBucket)
if err != nil {
return err
}
if _, err := edges.CreateBucket(edgeIndexBucket); err != nil {
return err
}
if _, err := edges.CreateBucket(edgeUpdateIndexBucket); err != nil {
return err
}
if _, err := edges.CreateBucket(channelPointBucket); err != nil {
return err
}
if _, err := edges.CreateBucket(zombieBucket); err != nil {
return err
}
graphMeta, err := tx.CreateBucket(graphMetaBucket)
if err != nil {
return err
}
_, err = graphMeta.CreateBucket(pruneLogBucket)
if err != nil {
return err
}
if _, err := tx.CreateBucket(metaBucket); err != nil {
return err
}
meta := &Meta{
DbVersionNumber: 0,
}
return putMeta(meta, tx)
})
if err != nil {
return fmt.Errorf("unable to create new channeldb")
}
return bdb.Close()
}
// fileExists returns true if the file exists, and false otherwise.
func fileExists(path string) bool {
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
// FetchClosedChannels attempts to fetch all closed channels from the database.
// The pendingOnly bool toggles if channels that aren't yet fully closed should
// be returned in the response or not. When a channel was cooperatively closed,
// it becomes fully closed after a single confirmation. When a channel was
// forcibly closed, it will become fully closed after _all_ the pending funds
// (if any) have been swept.
func (d *DB) FetchClosedChannels(pendingOnly bool) ([]*ChannelCloseSummary, error) {
var chanSummaries []*ChannelCloseSummary
if err := d.View(func(tx *bbolt.Tx) error {
closeBucket := tx.Bucket(closedChannelBucket)
if closeBucket == nil {
return ErrNoClosedChannels
}
return closeBucket.ForEach(func(chanID []byte, summaryBytes []byte) error {
summaryReader := bytes.NewReader(summaryBytes)
chanSummary, err := deserializeCloseChannelSummary(summaryReader)
if err != nil {
return err
}
// If the query specified to only include pending
// channels, then we'll skip any channels which aren't
// currently pending.
if !chanSummary.IsPending && pendingOnly {
return nil
}
chanSummaries = append(chanSummaries, chanSummary)
return nil
})
}); err != nil {
return nil, err
}
return chanSummaries, nil
}
// ChannelGraph returns a new instance of the directed channel graph.
func (d *DB) ChannelGraph() *ChannelGraph {
return d.graph
}

@ -0,0 +1,56 @@
package migration_01_to_11
import (
"fmt"
)
var (
// ErrNoInvoicesCreated is returned when we don't have invoices in
// our database to return.
ErrNoInvoicesCreated = fmt.Errorf("there are no existing invoices")
// ErrNoPaymentsCreated is returned when bucket of payments hasn't been
// created.
ErrNoPaymentsCreated = fmt.Errorf("there are no existing payments")
// ErrGraphNotFound is returned when at least one of the components of
// graph doesn't exist.
ErrGraphNotFound = fmt.Errorf("graph bucket not initialized")
// ErrSourceNodeNotSet is returned if the source node of the graph
// hasn't been added The source node is the center node within a
// star-graph.
ErrSourceNodeNotSet = fmt.Errorf("source node does not exist")
// ErrGraphNodeNotFound is returned when we're unable to find the target
// node.
ErrGraphNodeNotFound = fmt.Errorf("unable to find node")
// ErrEdgeNotFound is returned when an edge for the target chanID
// can't be found.
ErrEdgeNotFound = fmt.Errorf("edge not found")
// ErrUnknownAddressType is returned when a node's addressType is not
// an expected value.
ErrUnknownAddressType = fmt.Errorf("address type cannot be resolved")
// ErrNoClosedChannels is returned when a node is queries for all the
// channels it has closed, but it hasn't yet closed any channels.
ErrNoClosedChannels = fmt.Errorf("no channel have been closed yet")
// ErrEdgePolicyOptionalFieldNotFound is an error returned if a channel
// policy field is not found in the db even though its message flags
// indicate it should be.
ErrEdgePolicyOptionalFieldNotFound = fmt.Errorf("optional field not " +
"present")
)
// ErrTooManyExtraOpaqueBytes creates an error which should be returned if the
// caller attempts to write an announcement message which bares too many extra
// opaque bytes. We limit this value in order to ensure that we don't waste
// disk space due to nodes unnecessarily padding out their announcements with
// garbage data.
func ErrTooManyExtraOpaqueBytes(numBytes int) error {
return fmt.Errorf("max allowed number of opaque bytes is %v, received "+
"%v bytes", MaxAllowedExtraOpaqueBytes, numBytes)
}

@ -0,0 +1,1179 @@
package migration_01_to_11
import (
"bytes"
"encoding/binary"
"fmt"
"image/color"
"io"
"net"
"time"
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/lnwire"
)
var (
// nodeBucket is a bucket which houses all the vertices or nodes within
// the channel graph. This bucket has a single-sub bucket which adds an
// additional index from pubkey -> alias. Within the top-level of this
// bucket, the key space maps a node's compressed public key to the
// serialized information for that node. Additionally, there's a
// special key "source" which stores the pubkey of the source node. The
// source node is used as the starting point for all graph/queries and
// traversals. The graph is formed as a star-graph with the source node
// at the center.
//
// maps: pubKey -> nodeInfo
// maps: source -> selfPubKey
nodeBucket = []byte("graph-node")
// nodeUpdateIndexBucket is a sub-bucket of the nodeBucket. This bucket
// will be used to quickly look up the "freshness" of a node's last
// update to the network. The bucket only contains keys, and no values,
// it's mapping:
//
// maps: updateTime || nodeID -> nil
nodeUpdateIndexBucket = []byte("graph-node-update-index")
// sourceKey is a special key that resides within the nodeBucket. The
// sourceKey maps a key to the public key of the "self node".
sourceKey = []byte("source")
// aliasIndexBucket is a sub-bucket that's nested within the main
// nodeBucket. This bucket maps the public key of a node to its
// current alias. This bucket is provided as it can be used within a
// future UI layer to add an additional degree of confirmation.
aliasIndexBucket = []byte("alias")
// edgeBucket is a bucket which houses all of the edge or channel
// information within the channel graph. This bucket essentially acts
// as an adjacency list, which in conjunction with a range scan, can be
// used to iterate over all the incoming and outgoing edges for a
// particular node. Key in the bucket use a prefix scheme which leads
// with the node's public key and sends with the compact edge ID.
// For each chanID, there will be two entries within the bucket, as the
// graph is directed: nodes may have different policies w.r.t to fees
// for their respective directions.
//
// maps: pubKey || chanID -> channel edge policy for node
edgeBucket = []byte("graph-edge")
// unknownPolicy is represented as an empty slice. It is
// used as the value in edgeBucket for unknown channel edge policies.
// Unknown policies are still stored in the database to enable efficient
// lookup of incoming channel edges.
unknownPolicy = []byte{}
// edgeIndexBucket is an index which can be used to iterate all edges
// in the bucket, grouping them according to their in/out nodes.
// Additionally, the items in this bucket also contain the complete
// edge information for a channel. The edge information includes the
// capacity of the channel, the nodes that made the channel, etc. This
// bucket resides within the edgeBucket above. Creation of an edge
// proceeds in two phases: first the edge is added to the edge index,
// afterwards the edgeBucket can be updated with the latest details of
// the edge as they are announced on the network.
//
// maps: chanID -> pubKey1 || pubKey2 || restofEdgeInfo
edgeIndexBucket = []byte("edge-index")
// edgeUpdateIndexBucket is a sub-bucket of the main edgeBucket. This
// bucket contains an index which allows us to gauge the "freshness" of
// a channel's last updates.
//
// maps: updateTime || chanID -> nil
edgeUpdateIndexBucket = []byte("edge-update-index")
// channelPointBucket maps a channel's full outpoint (txid:index) to
// its short 8-byte channel ID. This bucket resides within the
// edgeBucket above, and can be used to quickly remove an edge due to
// the outpoint being spent, or to query for existence of a channel.
//
// maps: outPoint -> chanID
channelPointBucket = []byte("chan-index")
// zombieBucket is a sub-bucket of the main edgeBucket bucket
// responsible for maintaining an index of zombie channels. Each entry
// exists within the bucket as follows:
//
// maps: chanID -> pubKey1 || pubKey2
//
// The chanID represents the channel ID of the edge that is marked as a
// zombie and is used as the key, which maps to the public keys of the
// edge's participants.
zombieBucket = []byte("zombie-index")
// disabledEdgePolicyBucket is a sub-bucket of the main edgeBucket bucket
// responsible for maintaining an index of disabled edge policies. Each
// entry exists within the bucket as follows:
//
// maps: <chanID><direction> -> []byte{}
//
// The chanID represents the channel ID of the edge and the direction is
// one byte representing the direction of the edge. The main purpose of
// this index is to allow pruning disabled channels in a fast way without
// the need to iterate all over the graph.
disabledEdgePolicyBucket = []byte("disabled-edge-policy-index")
// graphMetaBucket is a top-level bucket which stores various meta-deta
// related to the on-disk channel graph. Data stored in this bucket
// includes the block to which the graph has been synced to, the total
// number of channels, etc.
graphMetaBucket = []byte("graph-meta")
// pruneLogBucket is a bucket within the graphMetaBucket that stores
// a mapping from the block height to the hash for the blocks used to
// prune the graph.
// Once a new block is discovered, any channels that have been closed
// (by spending the outpoint) can safely be removed from the graph, and
// the block is added to the prune log. We need to keep such a log for
// the case where a reorg happens, and we must "rewind" the state of the
// graph by removing channels that were previously confirmed. In such a
// case we'll remove all entries from the prune log with a block height
// that no longer exists.
pruneLogBucket = []byte("prune-log")
)
const (
// MaxAllowedExtraOpaqueBytes is the largest amount of opaque bytes that
// we'll permit to be written to disk. We limit this as otherwise, it
// would be possible for a node to create a ton of updates and slowly
// fill our disk, and also waste bandwidth due to relaying.
MaxAllowedExtraOpaqueBytes = 10000
)
// ChannelGraph is a persistent, on-disk graph representation of the Lightning
// Network. This struct can be used to implement path finding algorithms on top
// of, and also to update a node's view based on information received from the
// p2p network. Internally, the graph is stored using a modified adjacency list
// representation with some added object interaction possible with each
// serialized edge/node. The graph is stored is directed, meaning that are two
// edges stored for each channel: an inbound/outbound edge for each node pair.
// Nodes, edges, and edge information can all be added to the graph
// independently. Edge removal results in the deletion of all edge information
// for that edge.
type ChannelGraph struct {
db *DB
}
// newChannelGraph allocates a new ChannelGraph backed by a DB instance. The
// returned instance has its own unique reject cache and channel cache.
func newChannelGraph(db *DB, rejectCacheSize, chanCacheSize int) *ChannelGraph {
return &ChannelGraph{
db: db,
}
}
// SourceNode returns the source node of the graph. The source node is treated
// as the center node within a star-graph. This method may be used to kick off
// a path finding algorithm in order to explore the reachability of another
// node based off the source node.
func (c *ChannelGraph) SourceNode() (*LightningNode, error) {
var source *LightningNode
err := c.db.View(func(tx *bbolt.Tx) error {
// First grab the nodes bucket which stores the mapping from
// pubKey to node information.
nodes := tx.Bucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
node, err := c.sourceNode(nodes)
if err != nil {
return err
}
source = node
return nil
})
if err != nil {
return nil, err
}
return source, nil
}
// sourceNode uses an existing database transaction and returns the source node
// of the graph. The source node is treated as the center node within a
// star-graph. This method may be used to kick off a path finding algorithm in
// order to explore the reachability of another node based off the source node.
func (c *ChannelGraph) sourceNode(nodes *bbolt.Bucket) (*LightningNode, error) {
selfPub := nodes.Get(sourceKey)
if selfPub == nil {
return nil, ErrSourceNodeNotSet
}
// With the pubKey of the source node retrieved, we're able to
// fetch the full node information.
node, err := fetchLightningNode(nodes, selfPub)
if err != nil {
return nil, err
}
node.db = c.db
return &node, nil
}
// SetSourceNode sets the source node within the graph database. The source
// node is to be used as the center of a star-graph within path finding
// algorithms.
func (c *ChannelGraph) SetSourceNode(node *LightningNode) error {
nodePubBytes := node.PubKeyBytes[:]
return c.db.Update(func(tx *bbolt.Tx) error {
// First grab the nodes bucket which stores the mapping from
// pubKey to node information.
nodes, err := tx.CreateBucketIfNotExists(nodeBucket)
if err != nil {
return err
}
// Next we create the mapping from source to the targeted
// public key.
if err := nodes.Put(sourceKey, nodePubBytes); err != nil {
return err
}
// Finally, we commit the information of the lightning node
// itself.
return addLightningNode(tx, node)
})
}
func addLightningNode(tx *bbolt.Tx, node *LightningNode) error {
nodes, err := tx.CreateBucketIfNotExists(nodeBucket)
if err != nil {
return err
}
aliases, err := nodes.CreateBucketIfNotExists(aliasIndexBucket)
if err != nil {
return err
}
updateIndex, err := nodes.CreateBucketIfNotExists(
nodeUpdateIndexBucket,
)
if err != nil {
return err
}
return putLightningNode(nodes, aliases, updateIndex, node)
}
// updateEdgePolicy attempts to update an edge's policy within the relevant
// buckets using an existing database transaction. The returned boolean will be
// true if the updated policy belongs to node1, and false if the policy belonged
// to node2.
func updateEdgePolicy(tx *bbolt.Tx, edge *ChannelEdgePolicy) (bool, error) {
edges := tx.Bucket(edgeBucket)
if edges == nil {
return false, ErrEdgeNotFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
if edgeIndex == nil {
return false, ErrEdgeNotFound
}
nodes, err := tx.CreateBucketIfNotExists(nodeBucket)
if err != nil {
return false, err
}
// Create the channelID key be converting the channel ID
// integer into a byte slice.
var chanID [8]byte
byteOrder.PutUint64(chanID[:], edge.ChannelID)
// With the channel ID, we then fetch the value storing the two
// nodes which connect this channel edge.
nodeInfo := edgeIndex.Get(chanID[:])
if nodeInfo == nil {
return false, ErrEdgeNotFound
}
// Depending on the flags value passed above, either the first
// or second edge policy is being updated.
var fromNode, toNode []byte
var isUpdate1 bool
if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
fromNode = nodeInfo[:33]
toNode = nodeInfo[33:66]
isUpdate1 = true
} else {
fromNode = nodeInfo[33:66]
toNode = nodeInfo[:33]
isUpdate1 = false
}
// Finally, with the direction of the edge being updated
// identified, we update the on-disk edge representation.
err = putChanEdgePolicy(edges, nodes, edge, fromNode, toNode)
if err != nil {
return false, err
}
return isUpdate1, nil
}
// LightningNode represents an individual vertex/node within the channel graph.
// A node is connected to other nodes by one or more channel edges emanating
// from it. As the graph is directed, a node will also have an incoming edge
// attached to it for each outgoing edge.
type LightningNode struct {
// PubKeyBytes is the raw bytes of the public key of the target node.
PubKeyBytes [33]byte
pubKey *btcec.PublicKey
// HaveNodeAnnouncement indicates whether we received a node
// announcement for this particular node. If true, the remaining fields
// will be set, if false only the PubKey is known for this node.
HaveNodeAnnouncement bool
// LastUpdate is the last time the vertex information for this node has
// been updated.
LastUpdate time.Time
// Address is the TCP address this node is reachable over.
Addresses []net.Addr
// Color is the selected color for the node.
Color color.RGBA
// Alias is a nick-name for the node. The alias can be used to confirm
// a node's identity or to serve as a short ID for an address book.
Alias string
// AuthSigBytes is the raw signature under the advertised public key
// which serves to authenticate the attributes announced by this node.
AuthSigBytes []byte
// Features is the list of protocol features supported by this node.
Features *lnwire.FeatureVector
// ExtraOpaqueData is the set of data that was appended to this
// message, some of which we may not actually know how to iterate or
// parse. By holding onto this data, we ensure that we're able to
// properly validate the set of signatures that cover these new fields,
// and ensure we're able to make upgrades to the network in a forwards
// compatible manner.
ExtraOpaqueData []byte
db *DB
// TODO(roasbeef): discovery will need storage to keep it's last IP
// address and re-announce if interface changes?
// TODO(roasbeef): add update method and fetch?
}
// PubKey is the node's long-term identity public key. This key will be used to
// authenticated any advertisements/updates sent by the node.
//
// NOTE: By having this method to access an attribute, we ensure we only need
// to fully deserialize the pubkey if absolutely necessary.
func (l *LightningNode) PubKey() (*btcec.PublicKey, error) {
if l.pubKey != nil {
return l.pubKey, nil
}
key, err := btcec.ParsePubKey(l.PubKeyBytes[:], btcec.S256())
if err != nil {
return nil, err
}
l.pubKey = key
return key, nil
}
// ChannelEdgeInfo represents a fully authenticated channel along with all its
// unique attributes. Once an authenticated channel announcement has been
// processed on the network, then an instance of ChannelEdgeInfo encapsulating
// the channels attributes is stored. The other portions relevant to routing
// policy of a channel are stored within a ChannelEdgePolicy for each direction
// of the channel.
type ChannelEdgeInfo struct {
// ChannelID is the unique channel ID for the channel. The first 3
// bytes are the block height, the next 3 the index within the block,
// and the last 2 bytes are the output index for the channel.
ChannelID uint64
// ChainHash is the hash that uniquely identifies the chain that this
// channel was opened within.
//
// TODO(roasbeef): need to modify db keying for multi-chain
// * must add chain hash to prefix as well
ChainHash chainhash.Hash
// NodeKey1Bytes is the raw public key of the first node.
NodeKey1Bytes [33]byte
// NodeKey2Bytes is the raw public key of the first node.
NodeKey2Bytes [33]byte
// BitcoinKey1Bytes is the raw public key of the first node.
BitcoinKey1Bytes [33]byte
// BitcoinKey2Bytes is the raw public key of the first node.
BitcoinKey2Bytes [33]byte
// Features is an opaque byte slice that encodes the set of channel
// specific features that this channel edge supports.
Features []byte
// AuthProof is the authentication proof for this channel. This proof
// contains a set of signatures binding four identities, which attests
// to the legitimacy of the advertised channel.
AuthProof *ChannelAuthProof
// ChannelPoint is the funding outpoint of the channel. This can be
// used to uniquely identify the channel within the channel graph.
ChannelPoint wire.OutPoint
// Capacity is the total capacity of the channel, this is determined by
// the value output in the outpoint that created this channel.
Capacity btcutil.Amount
// ExtraOpaqueData is the set of data that was appended to this
// message, some of which we may not actually know how to iterate or
// parse. By holding onto this data, we ensure that we're able to
// properly validate the set of signatures that cover these new fields,
// and ensure we're able to make upgrades to the network in a forwards
// compatible manner.
ExtraOpaqueData []byte
}
// ChannelAuthProof is the authentication proof (the signature portion) for a
// channel. Using the four signatures contained in the struct, and some
// auxiliary knowledge (the funding script, node identities, and outpoint) nodes
// on the network are able to validate the authenticity and existence of a
// channel. Each of these signatures signs the following digest: chanID ||
// nodeID1 || nodeID2 || bitcoinKey1|| bitcoinKey2 || 2-byte-feature-len ||
// features.
type ChannelAuthProof struct {
// NodeSig1Bytes are the raw bytes of the first node signature encoded
// in DER format.
NodeSig1Bytes []byte
// NodeSig2Bytes are the raw bytes of the second node signature
// encoded in DER format.
NodeSig2Bytes []byte
// BitcoinSig1Bytes are the raw bytes of the first bitcoin signature
// encoded in DER format.
BitcoinSig1Bytes []byte
// BitcoinSig2Bytes are the raw bytes of the second bitcoin signature
// encoded in DER format.
BitcoinSig2Bytes []byte
}
// IsEmpty check is the authentication proof is empty Proof is empty if at
// least one of the signatures are equal to nil.
func (c *ChannelAuthProof) IsEmpty() bool {
return len(c.NodeSig1Bytes) == 0 ||
len(c.NodeSig2Bytes) == 0 ||
len(c.BitcoinSig1Bytes) == 0 ||
len(c.BitcoinSig2Bytes) == 0
}
// ChannelEdgePolicy represents a *directed* edge within the channel graph. For
// each channel in the database, there are two distinct edges: one for each
// possible direction of travel along the channel. The edges themselves hold
// information concerning fees, and minimum time-lock information which is
// utilized during path finding.
type ChannelEdgePolicy struct {
// SigBytes is the raw bytes of the signature of the channel edge
// policy. We'll only parse these if the caller needs to access the
// signature for validation purposes. Do not set SigBytes directly, but
// use SetSigBytes instead to make sure that the cache is invalidated.
SigBytes []byte
// ChannelID is the unique channel ID for the channel. The first 3
// bytes are the block height, the next 3 the index within the block,
// and the last 2 bytes are the output index for the channel.
ChannelID uint64
// LastUpdate is the last time an authenticated edge for this channel
// was received.
LastUpdate time.Time
// MessageFlags is a bitfield which indicates the presence of optional
// fields (like max_htlc) in the policy.
MessageFlags lnwire.ChanUpdateMsgFlags
// ChannelFlags is a bitfield which signals the capabilities of the
// channel as well as the directed edge this update applies to.
ChannelFlags lnwire.ChanUpdateChanFlags
// TimeLockDelta is the number of blocks this node will subtract from
// the expiry of an incoming HTLC. This value expresses the time buffer
// the node would like to HTLC exchanges.
TimeLockDelta uint16
// MinHTLC is the smallest value HTLC this node will accept, expressed
// in millisatoshi.
MinHTLC lnwire.MilliSatoshi
// MaxHTLC is the largest value HTLC this node will accept, expressed
// in millisatoshi.
MaxHTLC lnwire.MilliSatoshi
// FeeBaseMSat is the base HTLC fee that will be charged for forwarding
// ANY HTLC, expressed in mSAT's.
FeeBaseMSat lnwire.MilliSatoshi
// FeeProportionalMillionths is the rate that the node will charge for
// HTLCs for each millionth of a satoshi forwarded.
FeeProportionalMillionths lnwire.MilliSatoshi
// Node is the LightningNode that this directed edge leads to. Using
// this pointer the channel graph can further be traversed.
Node *LightningNode
// ExtraOpaqueData is the set of data that was appended to this
// message, some of which we may not actually know how to iterate or
// parse. By holding onto this data, we ensure that we're able to
// properly validate the set of signatures that cover these new fields,
// and ensure we're able to make upgrades to the network in a forwards
// compatible manner.
ExtraOpaqueData []byte
}
// IsDisabled determines whether the edge has the disabled bit set.
func (c *ChannelEdgePolicy) IsDisabled() bool {
return c.ChannelFlags&lnwire.ChanUpdateDisabled ==
lnwire.ChanUpdateDisabled
}
func putLightningNode(nodeBucket *bbolt.Bucket, aliasBucket *bbolt.Bucket,
updateIndex *bbolt.Bucket, node *LightningNode) error {
var (
scratch [16]byte
b bytes.Buffer
)
pub, err := node.PubKey()
if err != nil {
return err
}
nodePub := pub.SerializeCompressed()
// If the node has the update time set, write it, else write 0.
updateUnix := uint64(0)
if node.LastUpdate.Unix() > 0 {
updateUnix = uint64(node.LastUpdate.Unix())
}
byteOrder.PutUint64(scratch[:8], updateUnix)
if _, err := b.Write(scratch[:8]); err != nil {
return err
}
if _, err := b.Write(nodePub); err != nil {
return err
}
// If we got a node announcement for this node, we will have the rest
// of the data available. If not we don't have more data to write.
if !node.HaveNodeAnnouncement {
// Write HaveNodeAnnouncement=0.
byteOrder.PutUint16(scratch[:2], 0)
if _, err := b.Write(scratch[:2]); err != nil {
return err
}
return nodeBucket.Put(nodePub, b.Bytes())
}
// Write HaveNodeAnnouncement=1.
byteOrder.PutUint16(scratch[:2], 1)
if _, err := b.Write(scratch[:2]); err != nil {
return err
}
if err := binary.Write(&b, byteOrder, node.Color.R); err != nil {
return err
}
if err := binary.Write(&b, byteOrder, node.Color.G); err != nil {
return err
}
if err := binary.Write(&b, byteOrder, node.Color.B); err != nil {
return err
}
if err := wire.WriteVarString(&b, 0, node.Alias); err != nil {
return err
}
if err := node.Features.Encode(&b); err != nil {
return err
}
numAddresses := uint16(len(node.Addresses))
byteOrder.PutUint16(scratch[:2], numAddresses)
if _, err := b.Write(scratch[:2]); err != nil {
return err
}
for _, address := range node.Addresses {
if err := serializeAddr(&b, address); err != nil {
return err
}
}
sigLen := len(node.AuthSigBytes)
if sigLen > 80 {
return fmt.Errorf("max sig len allowed is 80, had %v",
sigLen)
}
err = wire.WriteVarBytes(&b, 0, node.AuthSigBytes)
if err != nil {
return err
}
if len(node.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
return ErrTooManyExtraOpaqueBytes(len(node.ExtraOpaqueData))
}
err = wire.WriteVarBytes(&b, 0, node.ExtraOpaqueData)
if err != nil {
return err
}
if err := aliasBucket.Put(nodePub, []byte(node.Alias)); err != nil {
return err
}
// With the alias bucket updated, we'll now update the index that
// tracks the time series of node updates.
var indexKey [8 + 33]byte
byteOrder.PutUint64(indexKey[:8], updateUnix)
copy(indexKey[8:], nodePub)
// If there was already an old index entry for this node, then we'll
// delete the old one before we write the new entry.
if nodeBytes := nodeBucket.Get(nodePub); nodeBytes != nil {
// Extract out the old update time to we can reconstruct the
// prior index key to delete it from the index.
oldUpdateTime := nodeBytes[:8]
var oldIndexKey [8 + 33]byte
copy(oldIndexKey[:8], oldUpdateTime)
copy(oldIndexKey[8:], nodePub)
if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
return err
}
}
if err := updateIndex.Put(indexKey[:], nil); err != nil {
return err
}
return nodeBucket.Put(nodePub, b.Bytes())
}
func fetchLightningNode(nodeBucket *bbolt.Bucket,
nodePub []byte) (LightningNode, error) {
nodeBytes := nodeBucket.Get(nodePub)
if nodeBytes == nil {
return LightningNode{}, ErrGraphNodeNotFound
}
nodeReader := bytes.NewReader(nodeBytes)
return deserializeLightningNode(nodeReader)
}
func deserializeLightningNode(r io.Reader) (LightningNode, error) {
var (
node LightningNode
scratch [8]byte
err error
)
if _, err := r.Read(scratch[:]); err != nil {
return LightningNode{}, err
}
unix := int64(byteOrder.Uint64(scratch[:]))
node.LastUpdate = time.Unix(unix, 0)
if _, err := io.ReadFull(r, node.PubKeyBytes[:]); err != nil {
return LightningNode{}, err
}
if _, err := r.Read(scratch[:2]); err != nil {
return LightningNode{}, err
}
hasNodeAnn := byteOrder.Uint16(scratch[:2])
if hasNodeAnn == 1 {
node.HaveNodeAnnouncement = true
} else {
node.HaveNodeAnnouncement = false
}
// The rest of the data is optional, and will only be there if we got a node
// announcement for this node.
if !node.HaveNodeAnnouncement {
return node, nil
}
// We did get a node announcement for this node, so we'll have the rest
// of the data available.
if err := binary.Read(r, byteOrder, &node.Color.R); err != nil {
return LightningNode{}, err
}
if err := binary.Read(r, byteOrder, &node.Color.G); err != nil {
return LightningNode{}, err
}
if err := binary.Read(r, byteOrder, &node.Color.B); err != nil {
return LightningNode{}, err
}
node.Alias, err = wire.ReadVarString(r, 0)
if err != nil {
return LightningNode{}, err
}
fv := lnwire.NewFeatureVector(nil, lnwire.GlobalFeatures)
err = fv.Decode(r)
if err != nil {
return LightningNode{}, err
}
node.Features = fv
if _, err := r.Read(scratch[:2]); err != nil {
return LightningNode{}, err
}
numAddresses := int(byteOrder.Uint16(scratch[:2]))
var addresses []net.Addr
for i := 0; i < numAddresses; i++ {
address, err := deserializeAddr(r)
if err != nil {
return LightningNode{}, err
}
addresses = append(addresses, address)
}
node.Addresses = addresses
node.AuthSigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
if err != nil {
return LightningNode{}, err
}
// We'll try and see if there are any opaque bytes left, if not, then
// we'll ignore the EOF error and return the node as is.
node.ExtraOpaqueData, err = wire.ReadVarBytes(
r, 0, MaxAllowedExtraOpaqueBytes, "blob",
)
switch {
case err == io.ErrUnexpectedEOF:
case err == io.EOF:
case err != nil:
return LightningNode{}, err
}
return node, nil
}
func deserializeChanEdgeInfo(r io.Reader) (ChannelEdgeInfo, error) {
var (
err error
edgeInfo ChannelEdgeInfo
)
if _, err := io.ReadFull(r, edgeInfo.NodeKey1Bytes[:]); err != nil {
return ChannelEdgeInfo{}, err
}
if _, err := io.ReadFull(r, edgeInfo.NodeKey2Bytes[:]); err != nil {
return ChannelEdgeInfo{}, err
}
if _, err := io.ReadFull(r, edgeInfo.BitcoinKey1Bytes[:]); err != nil {
return ChannelEdgeInfo{}, err
}
if _, err := io.ReadFull(r, edgeInfo.BitcoinKey2Bytes[:]); err != nil {
return ChannelEdgeInfo{}, err
}
edgeInfo.Features, err = wire.ReadVarBytes(r, 0, 900, "features")
if err != nil {
return ChannelEdgeInfo{}, err
}
proof := &ChannelAuthProof{}
proof.NodeSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
if err != nil {
return ChannelEdgeInfo{}, err
}
proof.NodeSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
if err != nil {
return ChannelEdgeInfo{}, err
}
proof.BitcoinSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
if err != nil {
return ChannelEdgeInfo{}, err
}
proof.BitcoinSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs")
if err != nil {
return ChannelEdgeInfo{}, err
}
if !proof.IsEmpty() {
edgeInfo.AuthProof = proof
}
edgeInfo.ChannelPoint = wire.OutPoint{}
if err := readOutpoint(r, &edgeInfo.ChannelPoint); err != nil {
return ChannelEdgeInfo{}, err
}
if err := binary.Read(r, byteOrder, &edgeInfo.Capacity); err != nil {
return ChannelEdgeInfo{}, err
}
if err := binary.Read(r, byteOrder, &edgeInfo.ChannelID); err != nil {
return ChannelEdgeInfo{}, err
}
if _, err := io.ReadFull(r, edgeInfo.ChainHash[:]); err != nil {
return ChannelEdgeInfo{}, err
}
// We'll try and see if there are any opaque bytes left, if not, then
// we'll ignore the EOF error and return the edge as is.
edgeInfo.ExtraOpaqueData, err = wire.ReadVarBytes(
r, 0, MaxAllowedExtraOpaqueBytes, "blob",
)
switch {
case err == io.ErrUnexpectedEOF:
case err == io.EOF:
case err != nil:
return ChannelEdgeInfo{}, err
}
return edgeInfo, nil
}
func putChanEdgePolicy(edges, nodes *bbolt.Bucket, edge *ChannelEdgePolicy,
from, to []byte) error {
var edgeKey [33 + 8]byte
copy(edgeKey[:], from)
byteOrder.PutUint64(edgeKey[33:], edge.ChannelID)
var b bytes.Buffer
if err := serializeChanEdgePolicy(&b, edge, to); err != nil {
return err
}
// Before we write out the new edge, we'll create a new entry in the
// update index in order to keep it fresh.
updateUnix := uint64(edge.LastUpdate.Unix())
var indexKey [8 + 8]byte
byteOrder.PutUint64(indexKey[:8], updateUnix)
byteOrder.PutUint64(indexKey[8:], edge.ChannelID)
updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket)
if err != nil {
return err
}
// If there was already an entry for this edge, then we'll need to
// delete the old one to ensure we don't leave around any after-images.
// An unknown policy value does not have a update time recorded, so
// it also does not need to be removed.
if edgeBytes := edges.Get(edgeKey[:]); edgeBytes != nil &&
!bytes.Equal(edgeBytes[:], unknownPolicy) {
// In order to delete the old entry, we'll need to obtain the
// *prior* update time in order to delete it. To do this, we'll
// need to deserialize the existing policy within the database
// (now outdated by the new one), and delete its corresponding
// entry within the update index. We'll ignore any
// ErrEdgePolicyOptionalFieldNotFound error, as we only need
// the channel ID and update time to delete the entry.
// TODO(halseth): get rid of these invalid policies in a
// migration.
oldEdgePolicy, err := deserializeChanEdgePolicy(
bytes.NewReader(edgeBytes), nodes,
)
if err != nil && err != ErrEdgePolicyOptionalFieldNotFound {
return err
}
oldUpdateTime := uint64(oldEdgePolicy.LastUpdate.Unix())
var oldIndexKey [8 + 8]byte
byteOrder.PutUint64(oldIndexKey[:8], oldUpdateTime)
byteOrder.PutUint64(oldIndexKey[8:], edge.ChannelID)
if err := updateIndex.Delete(oldIndexKey[:]); err != nil {
return err
}
}
if err := updateIndex.Put(indexKey[:], nil); err != nil {
return err
}
updateEdgePolicyDisabledIndex(
edges, edge.ChannelID,
edge.ChannelFlags&lnwire.ChanUpdateDirection > 0,
edge.IsDisabled(),
)
return edges.Put(edgeKey[:], b.Bytes()[:])
}
// updateEdgePolicyDisabledIndex is used to update the disabledEdgePolicyIndex
// bucket by either add a new disabled ChannelEdgePolicy or remove an existing
// one.
// The direction represents the direction of the edge and disabled is used for
// deciding whether to remove or add an entry to the bucket.
// In general a channel is disabled if two entries for the same chanID exist
// in this bucket.
// Maintaining the bucket this way allows a fast retrieval of disabled
// channels, for example when prune is needed.
func updateEdgePolicyDisabledIndex(edges *bbolt.Bucket, chanID uint64,
direction bool, disabled bool) error {
var disabledEdgeKey [8 + 1]byte
byteOrder.PutUint64(disabledEdgeKey[0:], chanID)
if direction {
disabledEdgeKey[8] = 1
}
disabledEdgePolicyIndex, err := edges.CreateBucketIfNotExists(
disabledEdgePolicyBucket,
)
if err != nil {
return err
}
if disabled {
return disabledEdgePolicyIndex.Put(disabledEdgeKey[:], []byte{})
}
return disabledEdgePolicyIndex.Delete(disabledEdgeKey[:])
}
// putChanEdgePolicyUnknown marks the edge policy as unknown
// in the edges bucket.
func putChanEdgePolicyUnknown(edges *bbolt.Bucket, channelID uint64,
from []byte) error {
var edgeKey [33 + 8]byte
copy(edgeKey[:], from)
byteOrder.PutUint64(edgeKey[33:], channelID)
if edges.Get(edgeKey[:]) != nil {
return fmt.Errorf("Cannot write unknown policy for channel %v "+
" when there is already a policy present", channelID)
}
return edges.Put(edgeKey[:], unknownPolicy)
}
func fetchChanEdgePolicy(edges *bbolt.Bucket, chanID []byte,
nodePub []byte, nodes *bbolt.Bucket) (*ChannelEdgePolicy, error) {
var edgeKey [33 + 8]byte
copy(edgeKey[:], nodePub)
copy(edgeKey[33:], chanID[:])
edgeBytes := edges.Get(edgeKey[:])
if edgeBytes == nil {
return nil, ErrEdgeNotFound
}
// No need to deserialize unknown policy.
if bytes.Equal(edgeBytes[:], unknownPolicy) {
return nil, nil
}
edgeReader := bytes.NewReader(edgeBytes)
ep, err := deserializeChanEdgePolicy(edgeReader, nodes)
switch {
// If the db policy was missing an expected optional field, we return
// nil as if the policy was unknown.
case err == ErrEdgePolicyOptionalFieldNotFound:
return nil, nil
case err != nil:
return nil, err
}
return ep, nil
}
func serializeChanEdgePolicy(w io.Writer, edge *ChannelEdgePolicy,
to []byte) error {
err := wire.WriteVarBytes(w, 0, edge.SigBytes)
if err != nil {
return err
}
if err := binary.Write(w, byteOrder, edge.ChannelID); err != nil {
return err
}
var scratch [8]byte
updateUnix := uint64(edge.LastUpdate.Unix())
byteOrder.PutUint64(scratch[:], updateUnix)
if _, err := w.Write(scratch[:]); err != nil {
return err
}
if err := binary.Write(w, byteOrder, edge.MessageFlags); err != nil {
return err
}
if err := binary.Write(w, byteOrder, edge.ChannelFlags); err != nil {
return err
}
if err := binary.Write(w, byteOrder, edge.TimeLockDelta); err != nil {
return err
}
if err := binary.Write(w, byteOrder, uint64(edge.MinHTLC)); err != nil {
return err
}
if err := binary.Write(w, byteOrder, uint64(edge.FeeBaseMSat)); err != nil {
return err
}
if err := binary.Write(w, byteOrder, uint64(edge.FeeProportionalMillionths)); err != nil {
return err
}
if _, err := w.Write(to); err != nil {
return err
}
// If the max_htlc field is present, we write it. To be compatible with
// older versions that wasn't aware of this field, we write it as part
// of the opaque data.
// TODO(halseth): clean up when moving to TLV.
var opaqueBuf bytes.Buffer
if edge.MessageFlags.HasMaxHtlc() {
err := binary.Write(&opaqueBuf, byteOrder, uint64(edge.MaxHTLC))
if err != nil {
return err
}
}
if len(edge.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes {
return ErrTooManyExtraOpaqueBytes(len(edge.ExtraOpaqueData))
}
if _, err := opaqueBuf.Write(edge.ExtraOpaqueData); err != nil {
return err
}
if err := wire.WriteVarBytes(w, 0, opaqueBuf.Bytes()); err != nil {
return err
}
return nil
}
func deserializeChanEdgePolicy(r io.Reader,
nodes *bbolt.Bucket) (*ChannelEdgePolicy, error) {
edge := &ChannelEdgePolicy{}
var err error
edge.SigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig")
if err != nil {
return nil, err
}
if err := binary.Read(r, byteOrder, &edge.ChannelID); err != nil {
return nil, err
}
var scratch [8]byte
if _, err := r.Read(scratch[:]); err != nil {
return nil, err
}
unix := int64(byteOrder.Uint64(scratch[:]))
edge.LastUpdate = time.Unix(unix, 0)
if err := binary.Read(r, byteOrder, &edge.MessageFlags); err != nil {
return nil, err
}
if err := binary.Read(r, byteOrder, &edge.ChannelFlags); err != nil {
return nil, err
}
if err := binary.Read(r, byteOrder, &edge.TimeLockDelta); err != nil {
return nil, err
}
var n uint64
if err := binary.Read(r, byteOrder, &n); err != nil {
return nil, err
}
edge.MinHTLC = lnwire.MilliSatoshi(n)
if err := binary.Read(r, byteOrder, &n); err != nil {
return nil, err
}
edge.FeeBaseMSat = lnwire.MilliSatoshi(n)
if err := binary.Read(r, byteOrder, &n); err != nil {
return nil, err
}
edge.FeeProportionalMillionths = lnwire.MilliSatoshi(n)
var pub [33]byte
if _, err := r.Read(pub[:]); err != nil {
return nil, err
}
node, err := fetchLightningNode(nodes, pub[:])
if err != nil {
return nil, fmt.Errorf("unable to fetch node: %x, %v",
pub[:], err)
}
edge.Node = &node
// We'll try and see if there are any opaque bytes left, if not, then
// we'll ignore the EOF error and return the edge as is.
edge.ExtraOpaqueData, err = wire.ReadVarBytes(
r, 0, MaxAllowedExtraOpaqueBytes, "blob",
)
switch {
case err == io.ErrUnexpectedEOF:
case err == io.EOF:
case err != nil:
return nil, err
}
// See if optional fields are present.
if edge.MessageFlags.HasMaxHtlc() {
// The max_htlc field should be at the beginning of the opaque
// bytes.
opq := edge.ExtraOpaqueData
// If the max_htlc field is not present, it might be old data
// stored before this field was validated. We'll return the
// edge along with an error.
if len(opq) < 8 {
return edge, ErrEdgePolicyOptionalFieldNotFound
}
maxHtlc := byteOrder.Uint64(opq[:8])
edge.MaxHTLC = lnwire.MilliSatoshi(maxHtlc)
// Exclude the parsed field from the rest of the opaque data.
edge.ExtraOpaqueData = opq[8:]
}
return edge, nil
}

@ -0,0 +1,57 @@
package migration_01_to_11
import (
"image/color"
"math/big"
prand "math/rand"
"net"
"time"
"github.com/btcsuite/btcd/btcec"
"github.com/lightningnetwork/lnd/lnwire"
)
var (
testAddr = &net.TCPAddr{IP: (net.IP)([]byte{0xA, 0x0, 0x0, 0x1}),
Port: 9000}
anotherAddr, _ = net.ResolveTCPAddr("tcp",
"[2001:db8:85a3:0:0:8a2e:370:7334]:80")
testAddrs = []net.Addr{testAddr, anotherAddr}
testSig = &btcec.Signature{
R: new(big.Int),
S: new(big.Int),
}
_, _ = testSig.R.SetString("63724406601629180062774974542967536251589935445068131219452686511677818569431", 10)
_, _ = testSig.S.SetString("18801056069249825825291287104931333862866033135609736119018462340006816851118", 10)
testFeatures = lnwire.NewFeatureVector(nil, lnwire.GlobalFeatures)
)
func createLightningNode(db *DB, priv *btcec.PrivateKey) (*LightningNode, error) {
updateTime := prand.Int63()
pub := priv.PubKey().SerializeCompressed()
n := &LightningNode{
HaveNodeAnnouncement: true,
AuthSigBytes: testSig.Serialize(),
LastUpdate: time.Unix(updateTime, 0),
Color: color.RGBA{1, 2, 3, 0},
Alias: "kek" + string(pub[:]),
Features: testFeatures,
Addresses: testAddrs,
db: db,
}
copy(n.PubKeyBytes[:], priv.PubKey().SerializeCompressed())
return n, nil
}
func createTestVertex(db *DB) (*LightningNode, error) {
priv, err := btcec.NewPrivateKey(btcec.S256())
if err != nil {
return nil, err
}
return createLightningNode(db, priv)
}

@ -0,0 +1,550 @@
package migration_01_to_11
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"time"
"github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/tlv"
)
var (
// invoiceBucket is the name of the bucket within the database that
// stores all data related to invoices no matter their final state.
// Within the invoice bucket, each invoice is keyed by its invoice ID
// which is a monotonically increasing uint32.
invoiceBucket = []byte("invoices")
// addIndexBucket is an index bucket that we'll use to create a
// monotonically increasing set of add indexes. Each time we add a new
// invoice, this sequence number will be incremented and then populated
// within the new invoice.
//
// In addition to this sequence number, we map:
//
// addIndexNo => invoiceKey
addIndexBucket = []byte("invoice-add-index")
// settleIndexBucket is an index bucket that we'll use to create a
// monotonically increasing integer for tracking a "settle index". Each
// time an invoice is settled, this sequence number will be incremented
// as populate within the newly settled invoice.
//
// In addition to this sequence number, we map:
//
// settleIndexNo => invoiceKey
settleIndexBucket = []byte("invoice-settle-index")
)
const (
// MaxMemoSize is maximum size of the memo field within invoices stored
// in the database.
MaxMemoSize = 1024
// MaxReceiptSize is the maximum size of the payment receipt stored
// within the database along side incoming/outgoing invoices.
MaxReceiptSize = 1024
// MaxPaymentRequestSize is the max size of a payment request for
// this invoice.
// TODO(halseth): determine the max length payment request when field
// lengths are final.
MaxPaymentRequestSize = 4096
// A set of tlv type definitions used to serialize invoice htlcs to the
// database.
chanIDType tlv.Type = 1
htlcIDType tlv.Type = 3
amtType tlv.Type = 5
acceptHeightType tlv.Type = 7
acceptTimeType tlv.Type = 9
resolveTimeType tlv.Type = 11
expiryHeightType tlv.Type = 13
stateType tlv.Type = 15
)
// ContractState describes the state the invoice is in.
type ContractState uint8
const (
// ContractOpen means the invoice has only been created.
ContractOpen ContractState = 0
// ContractSettled means the htlc is settled and the invoice has been
// paid.
ContractSettled ContractState = 1
// ContractCanceled means the invoice has been canceled.
ContractCanceled ContractState = 2
// ContractAccepted means the HTLC has been accepted but not settled
// yet.
ContractAccepted ContractState = 3
)
// String returns a human readable identifier for the ContractState type.
func (c ContractState) String() string {
switch c {
case ContractOpen:
return "Open"
case ContractSettled:
return "Settled"
case ContractCanceled:
return "Canceled"
case ContractAccepted:
return "Accepted"
}
return "Unknown"
}
// ContractTerm is a companion struct to the Invoice struct. This struct houses
// the necessary conditions required before the invoice can be considered fully
// settled by the payee.
type ContractTerm struct {
// PaymentPreimage is the preimage which is to be revealed in the
// occasion that an HTLC paying to the hash of this preimage is
// extended.
PaymentPreimage lntypes.Preimage
// Value is the expected amount of milli-satoshis to be paid to an HTLC
// which can be satisfied by the above preimage.
Value lnwire.MilliSatoshi
// State describes the state the invoice is in.
State ContractState
}
// Invoice is a payment invoice generated by a payee in order to request
// payment for some good or service. The inclusion of invoices within Lightning
// creates a payment work flow for merchants very similar to that of the
// existing financial system within PayPal, etc. Invoices are added to the
// database when a payment is requested, then can be settled manually once the
// payment is received at the upper layer. For record keeping purposes,
// invoices are never deleted from the database, instead a bit is toggled
// denoting the invoice has been fully settled. Within the database, all
// invoices must have a unique payment hash which is generated by taking the
// sha256 of the payment preimage.
type Invoice struct {
// Memo is an optional memo to be stored along side an invoice. The
// memo may contain further details pertaining to the invoice itself,
// or any other message which fits within the size constraints.
Memo []byte
// Receipt is an optional field dedicated for storing a
// cryptographically binding receipt of payment.
//
// TODO(roasbeef): document scheme.
Receipt []byte
// PaymentRequest is an optional field where a payment request created
// for this invoice can be stored.
PaymentRequest []byte
// FinalCltvDelta is the minimum required number of blocks before htlc
// expiry when the invoice is accepted.
FinalCltvDelta int32
// Expiry defines how long after creation this invoice should expire.
Expiry time.Duration
// CreationDate is the exact time the invoice was created.
CreationDate time.Time
// SettleDate is the exact time the invoice was settled.
SettleDate time.Time
// Terms are the contractual payment terms of the invoice. Once all the
// terms have been satisfied by the payer, then the invoice can be
// considered fully fulfilled.
//
// TODO(roasbeef): later allow for multiple terms to fulfill the final
// invoice: payment fragmentation, etc.
Terms ContractTerm
// AddIndex is an auto-incrementing integer that acts as a
// monotonically increasing sequence number for all invoices created.
// Clients can then use this field as a "checkpoint" of sorts when
// implementing a streaming RPC to notify consumers of instances where
// an invoice has been added before they re-connected.
//
// NOTE: This index starts at 1.
AddIndex uint64
// SettleIndex is an auto-incrementing integer that acts as a
// monotonically increasing sequence number for all settled invoices.
// Clients can then use this field as a "checkpoint" of sorts when
// implementing a streaming RPC to notify consumers of instances where
// an invoice has been settled before they re-connected.
//
// NOTE: This index starts at 1.
SettleIndex uint64
// AmtPaid is the final amount that we ultimately accepted for pay for
// this invoice. We specify this value independently as it's possible
// that the invoice originally didn't specify an amount, or the sender
// overpaid.
AmtPaid lnwire.MilliSatoshi
// Htlcs records all htlcs that paid to this invoice. Some of these
// htlcs may have been marked as canceled.
Htlcs map[CircuitKey]*InvoiceHTLC
}
// HtlcState defines the states an htlc paying to an invoice can be in.
type HtlcState uint8
// InvoiceHTLC contains details about an htlc paying to this invoice.
type InvoiceHTLC struct {
// Amt is the amount that is carried by this htlc.
Amt lnwire.MilliSatoshi
// AcceptHeight is the block height at which the invoice registry
// decided to accept this htlc as a payment to the invoice. At this
// height, the invoice cltv delay must have been met.
AcceptHeight uint32
// AcceptTime is the wall clock time at which the invoice registry
// decided to accept the htlc.
AcceptTime time.Time
// ResolveTime is the wall clock time at which the invoice registry
// decided to settle the htlc.
ResolveTime time.Time
// Expiry is the expiry height of this htlc.
Expiry uint32
// State indicates the state the invoice htlc is currently in. A
// canceled htlc isn't just removed from the invoice htlcs map, because
// we need AcceptHeight to properly cancel the htlc back.
State HtlcState
}
func validateInvoice(i *Invoice) error {
if len(i.Memo) > MaxMemoSize {
return fmt.Errorf("max length a memo is %v, and invoice "+
"of length %v was provided", MaxMemoSize, len(i.Memo))
}
if len(i.Receipt) > MaxReceiptSize {
return fmt.Errorf("max length a receipt is %v, and invoice "+
"of length %v was provided", MaxReceiptSize,
len(i.Receipt))
}
if len(i.PaymentRequest) > MaxPaymentRequestSize {
return fmt.Errorf("max length of payment request is %v, length "+
"provided was %v", MaxPaymentRequestSize,
len(i.PaymentRequest))
}
return nil
}
// FetchAllInvoices returns all invoices currently stored within the database.
// If the pendingOnly param is true, then only unsettled invoices will be
// returned, skipping all invoices that are fully settled.
func (d *DB) FetchAllInvoices(pendingOnly bool) ([]Invoice, error) {
var invoices []Invoice
err := d.View(func(tx *bbolt.Tx) error {
invoiceB := tx.Bucket(invoiceBucket)
if invoiceB == nil {
return ErrNoInvoicesCreated
}
// Iterate through the entire key space of the top-level
// invoice bucket. If key with a non-nil value stores the next
// invoice ID which maps to the corresponding invoice.
return invoiceB.ForEach(func(k, v []byte) error {
if v == nil {
return nil
}
invoiceReader := bytes.NewReader(v)
invoice, err := deserializeInvoice(invoiceReader)
if err != nil {
return err
}
if pendingOnly &&
invoice.Terms.State == ContractSettled {
return nil
}
invoices = append(invoices, invoice)
return nil
})
})
if err != nil {
return nil, err
}
return invoices, nil
}
// serializeInvoice serializes an invoice to a writer.
//
// Note: this function is in use for a migration. Before making changes that
// would modify the on disk format, make a copy of the original code and store
// it with the migration.
func serializeInvoice(w io.Writer, i *Invoice) error {
if err := wire.WriteVarBytes(w, 0, i.Memo[:]); err != nil {
return err
}
if err := wire.WriteVarBytes(w, 0, i.Receipt[:]); err != nil {
return err
}
if err := wire.WriteVarBytes(w, 0, i.PaymentRequest[:]); err != nil {
return err
}
if err := binary.Write(w, byteOrder, i.FinalCltvDelta); err != nil {
return err
}
if err := binary.Write(w, byteOrder, int64(i.Expiry)); err != nil {
return err
}
birthBytes, err := i.CreationDate.MarshalBinary()
if err != nil {
return err
}
if err := wire.WriteVarBytes(w, 0, birthBytes); err != nil {
return err
}
settleBytes, err := i.SettleDate.MarshalBinary()
if err != nil {
return err
}
if err := wire.WriteVarBytes(w, 0, settleBytes); err != nil {
return err
}
if _, err := w.Write(i.Terms.PaymentPreimage[:]); err != nil {
return err
}
var scratch [8]byte
byteOrder.PutUint64(scratch[:], uint64(i.Terms.Value))
if _, err := w.Write(scratch[:]); err != nil {
return err
}
if err := binary.Write(w, byteOrder, i.Terms.State); err != nil {
return err
}
if err := binary.Write(w, byteOrder, i.AddIndex); err != nil {
return err
}
if err := binary.Write(w, byteOrder, i.SettleIndex); err != nil {
return err
}
if err := binary.Write(w, byteOrder, int64(i.AmtPaid)); err != nil {
return err
}
if err := serializeHtlcs(w, i.Htlcs); err != nil {
return err
}
return nil
}
// serializeHtlcs serializes a map containing circuit keys and invoice htlcs to
// a writer.
func serializeHtlcs(w io.Writer, htlcs map[CircuitKey]*InvoiceHTLC) error {
for key, htlc := range htlcs {
// Encode the htlc in a tlv stream.
chanID := key.ChanID.ToUint64()
amt := uint64(htlc.Amt)
acceptTime := uint64(htlc.AcceptTime.UnixNano())
resolveTime := uint64(htlc.ResolveTime.UnixNano())
state := uint8(htlc.State)
tlvStream, err := tlv.NewStream(
tlv.MakePrimitiveRecord(chanIDType, &chanID),
tlv.MakePrimitiveRecord(htlcIDType, &key.HtlcID),
tlv.MakePrimitiveRecord(amtType, &amt),
tlv.MakePrimitiveRecord(
acceptHeightType, &htlc.AcceptHeight,
),
tlv.MakePrimitiveRecord(acceptTimeType, &acceptTime),
tlv.MakePrimitiveRecord(resolveTimeType, &resolveTime),
tlv.MakePrimitiveRecord(expiryHeightType, &htlc.Expiry),
tlv.MakePrimitiveRecord(stateType, &state),
)
if err != nil {
return err
}
var b bytes.Buffer
if err := tlvStream.Encode(&b); err != nil {
return err
}
// Write the length of the tlv stream followed by the stream
// bytes.
err = binary.Write(w, byteOrder, uint64(b.Len()))
if err != nil {
return err
}
if _, err := w.Write(b.Bytes()); err != nil {
return err
}
}
return nil
}
func deserializeInvoice(r io.Reader) (Invoice, error) {
var err error
invoice := Invoice{}
// TODO(roasbeef): use read full everywhere
invoice.Memo, err = wire.ReadVarBytes(r, 0, MaxMemoSize, "")
if err != nil {
return invoice, err
}
invoice.Receipt, err = wire.ReadVarBytes(r, 0, MaxReceiptSize, "")
if err != nil {
return invoice, err
}
invoice.PaymentRequest, err = wire.ReadVarBytes(r, 0, MaxPaymentRequestSize, "")
if err != nil {
return invoice, err
}
if err := binary.Read(r, byteOrder, &invoice.FinalCltvDelta); err != nil {
return invoice, err
}
var expiry int64
if err := binary.Read(r, byteOrder, &expiry); err != nil {
return invoice, err
}
invoice.Expiry = time.Duration(expiry)
birthBytes, err := wire.ReadVarBytes(r, 0, 300, "birth")
if err != nil {
return invoice, err
}
if err := invoice.CreationDate.UnmarshalBinary(birthBytes); err != nil {
return invoice, err
}
settledBytes, err := wire.ReadVarBytes(r, 0, 300, "settled")
if err != nil {
return invoice, err
}
if err := invoice.SettleDate.UnmarshalBinary(settledBytes); err != nil {
return invoice, err
}
if _, err := io.ReadFull(r, invoice.Terms.PaymentPreimage[:]); err != nil {
return invoice, err
}
var scratch [8]byte
if _, err := io.ReadFull(r, scratch[:]); err != nil {
return invoice, err
}
invoice.Terms.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:]))
if err := binary.Read(r, byteOrder, &invoice.Terms.State); err != nil {
return invoice, err
}
if err := binary.Read(r, byteOrder, &invoice.AddIndex); err != nil {
return invoice, err
}
if err := binary.Read(r, byteOrder, &invoice.SettleIndex); err != nil {
return invoice, err
}
if err := binary.Read(r, byteOrder, &invoice.AmtPaid); err != nil {
return invoice, err
}
invoice.Htlcs, err = deserializeHtlcs(r)
if err != nil {
return Invoice{}, err
}
return invoice, nil
}
// deserializeHtlcs reads a list of invoice htlcs from a reader and returns it
// as a map.
func deserializeHtlcs(r io.Reader) (map[CircuitKey]*InvoiceHTLC, error) {
htlcs := make(map[CircuitKey]*InvoiceHTLC, 0)
for {
// Read the length of the tlv stream for this htlc.
var streamLen uint64
if err := binary.Read(r, byteOrder, &streamLen); err != nil {
if err == io.EOF {
break
}
return nil, err
}
streamBytes := make([]byte, streamLen)
if _, err := r.Read(streamBytes); err != nil {
return nil, err
}
streamReader := bytes.NewReader(streamBytes)
// Decode the contents into the htlc fields.
var (
htlc InvoiceHTLC
key CircuitKey
chanID uint64
state uint8
acceptTime, resolveTime uint64
amt uint64
)
tlvStream, err := tlv.NewStream(
tlv.MakePrimitiveRecord(chanIDType, &chanID),
tlv.MakePrimitiveRecord(htlcIDType, &key.HtlcID),
tlv.MakePrimitiveRecord(amtType, &amt),
tlv.MakePrimitiveRecord(
acceptHeightType, &htlc.AcceptHeight,
),
tlv.MakePrimitiveRecord(acceptTimeType, &acceptTime),
tlv.MakePrimitiveRecord(resolveTimeType, &resolveTime),
tlv.MakePrimitiveRecord(expiryHeightType, &htlc.Expiry),
tlv.MakePrimitiveRecord(stateType, &state),
)
if err != nil {
return nil, err
}
if err := tlvStream.Decode(streamReader); err != nil {
return nil, err
}
key.ChanID = lnwire.NewShortChanIDFromInt(chanID)
htlc.AcceptTime = time.Unix(0, int64(acceptTime))
htlc.ResolveTime = time.Unix(0, int64(resolveTime))
htlc.State = HtlcState(state)
htlc.Amt = lnwire.MilliSatoshi(amt)
htlcs[key] = &htlc
}
return htlcs, nil
}

@ -0,0 +1,55 @@
package migration_01_to_11
import (
"io"
)
// deserializeCloseChannelSummaryV6 reads the v6 database format for
// ChannelCloseSummary.
//
// NOTE: deprecated, only for migration.
func deserializeCloseChannelSummaryV6(r io.Reader) (*ChannelCloseSummary, error) {
c := &ChannelCloseSummary{}
err := ReadElements(r,
&c.ChanPoint, &c.ShortChanID, &c.ChainHash, &c.ClosingTXID,
&c.CloseHeight, &c.RemotePub, &c.Capacity, &c.SettledBalance,
&c.TimeLockedBalance, &c.CloseType, &c.IsPending,
)
if err != nil {
return nil, err
}
// We'll now check to see if the channel close summary was encoded with
// any of the additional optional fields.
err = ReadElements(r, &c.RemoteCurrentRevocation)
switch {
case err == io.EOF:
return c, nil
// If we got a non-eof error, then we know there's an actually issue.
// Otherwise, it may have been the case that this summary didn't have
// the set of optional fields.
case err != nil:
return nil, err
}
if err := readChanConfig(r, &c.LocalChanConfig); err != nil {
return nil, err
}
// Finally, we'll attempt to read the next unrevoked commitment point
// for the remote party. If we closed the channel before receiving a
// funding locked message, then this can be nil. As a result, we'll use
// the same technique to read the field, only if there's still data
// left in the buffer.
err = ReadElements(r, &c.RemoteNextRevocation)
if err != nil && err != io.EOF {
// If we got a non-eof error, then we know there's an actually
// issue. Otherwise, it may have been the case that this
// summary didn't have the set of optional fields.
return nil, err
}
return c, nil
}

@ -0,0 +1,14 @@
package migration_01_to_11
import (
"github.com/btcsuite/btclog"
)
// log is a logger that is initialized as disabled. This means the package will
// not perform any logging by default until a logger is set.
var log = btclog.Disabled
// UseLogger uses a specified Logger to output package logging info.
func UseLogger(logger btclog.Logger) {
log = logger
}

@ -0,0 +1,37 @@
package migration_01_to_11
import "github.com/coreos/bbolt"
var (
// metaBucket stores all the meta information concerning the state of
// the database.
metaBucket = []byte("metadata")
// dbVersionKey is a boltdb key and it's used for storing/retrieving
// current database version.
dbVersionKey = []byte("dbp")
)
// Meta structure holds the database meta information.
type Meta struct {
// DbVersionNumber is the current schema version of the database.
DbVersionNumber uint32
}
// putMeta is an internal helper function used in order to allow callers to
// re-use a database transaction. See the publicly exported PutMeta method for
// more information.
func putMeta(meta *Meta, tx *bbolt.Tx) error {
metaBucket, err := tx.CreateBucketIfNotExists(metaBucket)
if err != nil {
return err
}
return putDbVersion(metaBucket, meta)
}
func putDbVersion(metaBucket *bbolt.Bucket, meta *Meta) error {
scratch := make([]byte, 4)
byteOrder.PutUint32(scratch, meta.DbVersionNumber)
return metaBucket.Put(dbVersionKey, scratch)
}

@ -0,0 +1,58 @@
package migration_01_to_11
import (
"testing"
"github.com/coreos/bbolt"
"github.com/go-errors/errors"
)
// applyMigration is a helper test function that encapsulates the general steps
// which are needed to properly check the result of applying migration function.
func applyMigration(t *testing.T, beforeMigration, afterMigration func(d *DB),
migrationFunc migration, shouldFail bool) {
cdb, cleanUp, err := makeTestDB()
defer cleanUp()
if err != nil {
t.Fatal(err)
}
// Create a test node that will be our source node.
testNode, err := createTestVertex(cdb)
if err != nil {
t.Fatal(err)
}
graph := cdb.ChannelGraph()
if err := graph.SetSourceNode(testNode); err != nil {
t.Fatal(err)
}
// beforeMigration usually used for populating the database
// with test data.
beforeMigration(cdb)
defer func() {
if r := recover(); r != nil {
err = errors.New(r)
}
if err == nil && shouldFail {
t.Fatal("error wasn't received on migration stage")
} else if err != nil && !shouldFail {
t.Fatalf("error was received on migration stage: %v", err)
}
// afterMigration usually used for checking the database state and
// throwing the error if something went wrong.
afterMigration(cdb)
}()
// Apply migration.
err = cdb.Update(func(tx *bbolt.Tx) error {
return migrationFunc(tx)
})
if err != nil {
log.Error(err)
}
}

@ -1,4 +1,4 @@
package channeldb
package migration_01_to_11
import (
"bytes"

@ -1,4 +1,4 @@
package channeldb
package migration_01_to_11
import (
"bytes"
@ -8,10 +8,10 @@ import (
"github.com/lightningnetwork/lnd/routing/route"
)
// migrateRouteSerialization migrates the way we serialize routes across the
// MigrateRouteSerialization migrates the way we serialize routes across the
// entire database. At the time of writing of this migration, this includes our
// payment attempts, as well as the payment results in mission control.
func migrateRouteSerialization(tx *bbolt.Tx) error {
func MigrateRouteSerialization(tx *bbolt.Tx) error {
// First, we'll do all the payment attempts.
rootPaymentBucket := tx.Bucket(paymentsRootBucket)
if rootPaymentBucket == nil {

@ -1,4 +1,4 @@
package channeldb
package migration_01_to_11
import (
"bytes"
@ -14,9 +14,9 @@ import (
litecoinCfg "github.com/ltcsuite/ltcd/chaincfg"
)
// migrateInvoices adds invoice htlcs and a separate cltv delta field to the
// MigrateInvoices adds invoice htlcs and a separate cltv delta field to the
// invoices.
func migrateInvoices(tx *bbolt.Tx) error {
func MigrateInvoices(tx *bbolt.Tx) error {
log.Infof("Migrating invoices to new invoice format")
invoiceB := tx.Bucket(invoiceBucket)

@ -1,4 +1,4 @@
package channeldb
package migration_01_to_11
import (
"bytes"
@ -88,15 +88,6 @@ func TestMigrateInvoices(t *testing.T) {
// Verify that all invoices were migrated.
afterMigrationFunc := func(d *DB) {
meta, err := d.FetchMeta(nil)
if err != nil {
t.Fatal(err)
}
if meta.DbVersionNumber != 1 {
t.Fatal("migration 'invoices' wasn't applied")
}
dbInvoices, err := d.FetchAllInvoices(false)
if err != nil {
t.Fatalf("unable to fetch invoices: %v", err)
@ -123,7 +114,7 @@ func TestMigrateInvoices(t *testing.T) {
applyMigration(t,
func(d *DB) { beforeMigrationFuncV11(t, d, invoices) },
afterMigrationFunc,
migrateInvoices,
MigrateInvoices,
false)
}
@ -149,7 +140,7 @@ func TestMigrateInvoicesHodl(t *testing.T) {
applyMigration(t,
func(d *DB) { beforeMigrationFuncV11(t, d, invoices) },
func(d *DB) {},
migrateInvoices,
MigrateInvoices,
true)
}

@ -1,4 +1,4 @@
package channeldb
package migration_01_to_11
import (
"bytes"
@ -12,12 +12,12 @@ import (
"github.com/lightningnetwork/lnd/routing/route"
)
// migrateNodeAndEdgeUpdateIndex is a migration function that will update the
// MigrateNodeAndEdgeUpdateIndex is a migration function that will update the
// database from version 0 to version 1. In version 1, we add two new indexes
// (one for nodes and one for edges) to keep track of the last time a node or
// edge was updated on the network. These new indexes allow us to implement the
// new graph sync protocol added.
func migrateNodeAndEdgeUpdateIndex(tx *bbolt.Tx) error {
func MigrateNodeAndEdgeUpdateIndex(tx *bbolt.Tx) error {
// First, we'll populating the node portion of the new index. Before we
// can add new values to the index, we'll first create the new bucket
// where these items will be housed.
@ -118,11 +118,11 @@ func migrateNodeAndEdgeUpdateIndex(tx *bbolt.Tx) error {
return nil
}
// migrateInvoiceTimeSeries is a database migration that assigns all existing
// MigrateInvoiceTimeSeries is a database migration that assigns all existing
// invoices an index in the add and/or the settle index. Additionally, all
// existing invoices will have their bytes padded out in order to encode the
// add+settle index as well as the amount paid.
func migrateInvoiceTimeSeries(tx *bbolt.Tx) error {
func MigrateInvoiceTimeSeries(tx *bbolt.Tx) error {
invoices, err := tx.CreateBucketIfNotExists(invoiceBucket)
if err != nil {
return err
@ -255,11 +255,11 @@ func migrateInvoiceTimeSeries(tx *bbolt.Tx) error {
return nil
}
// migrateInvoiceTimeSeriesOutgoingPayments is a follow up to the
// MigrateInvoiceTimeSeriesOutgoingPayments is a follow up to the
// migrateInvoiceTimeSeries migration. As at the time of writing, the
// OutgoingPayment struct embeddeds an instance of the Invoice struct. As a
// result, we also need to migrate the internal invoice to the new format.
func migrateInvoiceTimeSeriesOutgoingPayments(tx *bbolt.Tx) error {
func MigrateInvoiceTimeSeriesOutgoingPayments(tx *bbolt.Tx) error {
payBucket := tx.Bucket(paymentBucket)
if payBucket == nil {
return nil
@ -336,11 +336,11 @@ func migrateInvoiceTimeSeriesOutgoingPayments(tx *bbolt.Tx) error {
return nil
}
// migrateEdgePolicies is a migration function that will update the edges
// MigrateEdgePolicies is a migration function that will update the edges
// bucket. It ensure that edges with unknown policies will also have an entry
// in the bucket. After the migration, there will be two edge entries for
// every channel, regardless of whether the policies are known.
func migrateEdgePolicies(tx *bbolt.Tx) error {
func MigrateEdgePolicies(tx *bbolt.Tx) error {
nodes := tx.Bucket(nodeBucket)
if nodes == nil {
return nil
@ -409,10 +409,10 @@ func migrateEdgePolicies(tx *bbolt.Tx) error {
return nil
}
// paymentStatusesMigration is a database migration intended for adding payment
// PaymentStatusesMigration is a database migration intended for adding payment
// statuses for each existing payment entity in bucket to be able control
// transitions of statuses and prevent cases such as double payment
func paymentStatusesMigration(tx *bbolt.Tx) error {
func PaymentStatusesMigration(tx *bbolt.Tx) error {
// Get the bucket dedicated to storing statuses of payments,
// where a key is payment hash, value is payment status.
paymentStatuses, err := tx.CreateBucketIfNotExists(paymentStatusBucket)
@ -492,14 +492,14 @@ func paymentStatusesMigration(tx *bbolt.Tx) error {
return nil
}
// migratePruneEdgeUpdateIndex is a database migration that attempts to resolve
// MigratePruneEdgeUpdateIndex is a database migration that attempts to resolve
// some lingering bugs with regards to edge policies and their update index.
// Stale entries within the edge update index were not being properly pruned due
// to a miscalculation on the offset of an edge's policy last update. This
// migration also fixes the case where the public keys within edge policies were
// being serialized with an extra byte, causing an even greater error when
// attempting to perform the offset calculation described earlier.
func migratePruneEdgeUpdateIndex(tx *bbolt.Tx) error {
func MigratePruneEdgeUpdateIndex(tx *bbolt.Tx) error {
// To begin the migration, we'll retrieve the update index bucket. If it
// does not exist, we have nothing left to do so we can simply exit.
edges := tx.Bucket(edgeBucket)
@ -610,10 +610,10 @@ func migratePruneEdgeUpdateIndex(tx *bbolt.Tx) error {
return nil
}
// migrateOptionalChannelCloseSummaryFields migrates the serialized format of
// MigrateOptionalChannelCloseSummaryFields migrates the serialized format of
// ChannelCloseSummary to a format where optional fields' presence is indicated
// with boolean markers.
func migrateOptionalChannelCloseSummaryFields(tx *bbolt.Tx) error {
func MigrateOptionalChannelCloseSummaryFields(tx *bbolt.Tx) error {
closedChanBucket := tx.Bucket(closedChannelBucket)
if closedChanBucket == nil {
return nil
@ -669,10 +669,10 @@ func migrateOptionalChannelCloseSummaryFields(tx *bbolt.Tx) error {
var messageStoreBucket = []byte("message-store")
// migrateGossipMessageStoreKeys migrates the key format for gossip messages
// MigrateGossipMessageStoreKeys migrates the key format for gossip messages
// found in the message store to a new one that takes into consideration the of
// the message being stored.
func migrateGossipMessageStoreKeys(tx *bbolt.Tx) error {
func MigrateGossipMessageStoreKeys(tx *bbolt.Tx) error {
// We'll start by retrieving the bucket in which these messages are
// stored within. If there isn't one, there's nothing left for us to do
// so we can avoid the migration.
@ -739,7 +739,7 @@ func migrateGossipMessageStoreKeys(tx *bbolt.Tx) error {
return nil
}
// migrateOutgoingPayments moves the OutgoingPayments into a new bucket format
// MigrateOutgoingPayments moves the OutgoingPayments into a new bucket format
// where they all reside in a top-level bucket indexed by the payment hash. In
// this sub-bucket we store information relevant to this payment, such as the
// payment status.
@ -748,7 +748,7 @@ func migrateGossipMessageStoreKeys(tx *bbolt.Tx) error {
// InFlight (we have no PaymentAttemptInfo available for pre-migration
// payments) we delete those statuses, so only Completed payments remain in the
// new bucket structure.
func migrateOutgoingPayments(tx *bbolt.Tx) error {
func MigrateOutgoingPayments(tx *bbolt.Tx) error {
log.Infof("Migrating outgoing payments to new bucket structure")
oldPayments := tx.Bucket(paymentBucket)

@ -1,4 +1,4 @@
package channeldb
package migration_01_to_11
import (
"bytes"
@ -135,15 +135,6 @@ func TestPaymentStatusesMigration(t *testing.T) {
// Verify that the created payment status is "Completed" for our one
// fake payment.
afterMigrationFunc := func(d *DB) {
meta, err := d.FetchMeta(nil)
if err != nil {
t.Fatal(err)
}
if meta.DbVersionNumber != 1 {
t.Fatal("migration 'paymentStatusesMigration' wasn't applied")
}
// Check that our completed payments were migrated.
paymentStatus, err := d.fetchPaymentStatus(paymentHash)
if err != nil {
@ -197,7 +188,7 @@ func TestPaymentStatusesMigration(t *testing.T) {
applyMigration(t,
beforeMigrationFunc,
afterMigrationFunc,
paymentStatusesMigration,
PaymentStatusesMigration,
false)
}
@ -404,15 +395,6 @@ func TestMigrateOptionalChannelCloseSummaryFields(t *testing.T) {
// After the migration it should be found in the new format.
afterMigrationFunc := func(d *DB) {
meta, err := d.FetchMeta(nil)
if err != nil {
t.Fatal(err)
}
if meta.DbVersionNumber != 1 {
t.Fatal("migration wasn't applied")
}
// We generate the new serialized version, to check
// against what is found in the DB.
var b bytes.Buffer
@ -469,7 +451,7 @@ func TestMigrateOptionalChannelCloseSummaryFields(t *testing.T) {
applyMigration(t,
beforeMigrationFunc,
afterMigrationFunc,
migrateOptionalChannelCloseSummaryFields,
MigrateOptionalChannelCloseSummaryFields,
false)
}
}
@ -521,16 +503,8 @@ func TestMigrateGossipMessageStoreKeys(t *testing.T) {
// 2. We can find the message under its new key.
// 3. The message matches the original.
afterMigration := func(db *DB) {
meta, err := db.FetchMeta(nil)
if err != nil {
t.Fatalf("unable to fetch db version: %v", err)
}
if meta.DbVersionNumber != 1 {
t.Fatalf("migration should have succeeded but didn't")
}
var rawMsg []byte
err = db.View(func(tx *bbolt.Tx) error {
err := db.View(func(tx *bbolt.Tx) error {
messageStore := tx.Bucket(messageStoreBucket)
if messageStore == nil {
return errors.New("message store bucket not " +
@ -565,7 +539,7 @@ func TestMigrateGossipMessageStoreKeys(t *testing.T) {
applyMigration(
t, beforeMigration, afterMigration,
migrateGossipMessageStoreKeys, false,
MigrateGossipMessageStoreKeys, false,
)
}
@ -617,15 +591,6 @@ func TestOutgoingPaymentsMigration(t *testing.T) {
// Verify that all payments were migrated.
afterMigrationFunc := func(d *DB) {
meta, err := d.FetchMeta(nil)
if err != nil {
t.Fatal(err)
}
if meta.DbVersionNumber != 1 {
t.Fatal("migration 'paymentStatusesMigration' wasn't applied")
}
sentPayments, err := d.fetchPaymentsMigration9()
if err != nil {
t.Fatalf("unable to fetch sent payments: %v", err)
@ -724,7 +689,7 @@ func TestOutgoingPaymentsMigration(t *testing.T) {
applyMigration(t,
beforeMigrationFunc,
afterMigrationFunc,
migrateOutgoingPayments,
MigrateOutgoingPayments,
false)
}
@ -947,6 +912,17 @@ func TestPaymentRouteSerialization(t *testing.T) {
applyMigration(t,
beforeMigrationFunc,
afterMigrationFunc,
migrateRouteSerialization,
MigrateRouteSerialization,
false)
}
// TestNotCoveredMigrations only references migrations that are not referenced
// anywhere else in this package. This prevents false positives when linting
// with unused.
func TestNotCoveredMigrations(t *testing.T) {
_ = MigrateNodeAndEdgeUpdateIndex
_ = MigrateInvoiceTimeSeries
_ = MigrateInvoiceTimeSeriesOutgoingPayments
_ = MigrateEdgePolicies
_ = MigratePruneEdgeUpdateIndex
}

@ -0,0 +1,41 @@
package migration_01_to_11
const (
// DefaultRejectCacheSize is the default number of rejectCacheEntries to
// cache for use in the rejection cache of incoming gossip traffic. This
// produces a cache size of around 1MB.
DefaultRejectCacheSize = 50000
// DefaultChannelCacheSize is the default number of ChannelEdges cached
// in order to reply to gossip queries. This produces a cache size of
// around 40MB.
DefaultChannelCacheSize = 20000
)
// Options holds parameters for tuning and customizing a channeldb.DB.
type Options struct {
// RejectCacheSize is the maximum number of rejectCacheEntries to hold
// in the rejection cache.
RejectCacheSize int
// ChannelCacheSize is the maximum number of ChannelEdges to hold in the
// channel cache.
ChannelCacheSize int
// NoFreelistSync, if true, prevents the database from syncing its
// freelist to disk, resulting in improved performance at the expense of
// increased startup time.
NoFreelistSync bool
}
// DefaultOptions returns an Options populated with default values.
func DefaultOptions() Options {
return Options{
RejectCacheSize: DefaultRejectCacheSize,
ChannelCacheSize: DefaultChannelCacheSize,
NoFreelistSync: true,
}
}
// OptionModifier is a function signature for modifying the default Options.
type OptionModifier func(*Options)

@ -0,0 +1,23 @@
package migration_01_to_11
import (
"github.com/coreos/bbolt"
)
// fetchPaymentStatus fetches the payment status of the payment. If the payment
// isn't found, it will default to "StatusUnknown".
func fetchPaymentStatus(bucket *bbolt.Bucket) PaymentStatus {
if bucket.Get(paymentSettleInfoKey) != nil {
return StatusSucceeded
}
if bucket.Get(paymentFailInfoKey) != nil {
return StatusFailed
}
if bucket.Get(paymentCreationInfoKey) != nil {
return StatusInFlight
}
return StatusUnknown
}

@ -0,0 +1,627 @@
package migration_01_to_11
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"sort"
"time"
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/routing/route"
"github.com/lightningnetwork/lnd/tlv"
)
var (
// paymentsRootBucket is the name of the top-level bucket within the
// database that stores all data related to payments. Within this
// bucket, each payment hash its own sub-bucket keyed by its payment
// hash.
//
// Bucket hierarchy:
//
// root-bucket
// |
// |-- <paymenthash>
// | |--sequence-key: <sequence number>
// | |--creation-info-key: <creation info>
// | |--attempt-info-key: <attempt info>
// | |--settle-info-key: <settle info>
// | |--fail-info-key: <fail info>
// | |
// | |--duplicate-bucket (only for old, completed payments)
// | |
// | |-- <seq-num>
// | | |--sequence-key: <sequence number>
// | | |--creation-info-key: <creation info>
// | | |--attempt-info-key: <attempt info>
// | | |--settle-info-key: <settle info>
// | | |--fail-info-key: <fail info>
// | |
// | |-- <seq-num>
// | | |
// | ... ...
// |
// |-- <paymenthash>
// | |
// | ...
// ...
//
paymentsRootBucket = []byte("payments-root-bucket")
// paymentDublicateBucket is the name of a optional sub-bucket within
// the payment hash bucket, that is used to hold duplicate payments to
// a payment hash. This is needed to support information from earlier
// versions of lnd, where it was possible to pay to a payment hash more
// than once.
paymentDuplicateBucket = []byte("payment-duplicate-bucket")
// paymentSequenceKey is a key used in the payment's sub-bucket to
// store the sequence number of the payment.
paymentSequenceKey = []byte("payment-sequence-key")
// paymentCreationInfoKey is a key used in the payment's sub-bucket to
// store the creation info of the payment.
paymentCreationInfoKey = []byte("payment-creation-info")
// paymentAttemptInfoKey is a key used in the payment's sub-bucket to
// store the info about the latest attempt that was done for the
// payment in question.
paymentAttemptInfoKey = []byte("payment-attempt-info")
// paymentSettleInfoKey is a key used in the payment's sub-bucket to
// store the settle info of the payment.
paymentSettleInfoKey = []byte("payment-settle-info")
// paymentFailInfoKey is a key used in the payment's sub-bucket to
// store information about the reason a payment failed.
paymentFailInfoKey = []byte("payment-fail-info")
)
// FailureReason encodes the reason a payment ultimately failed.
type FailureReason byte
const (
// FailureReasonTimeout indicates that the payment did timeout before a
// successful payment attempt was made.
FailureReasonTimeout FailureReason = 0
// FailureReasonNoRoute indicates no successful route to the
// destination was found during path finding.
FailureReasonNoRoute FailureReason = 1
// FailureReasonError indicates that an unexpected error happened during
// payment.
FailureReasonError FailureReason = 2
// FailureReasonIncorrectPaymentDetails indicates that either the hash
// is unknown or the final cltv delta or amount is incorrect.
FailureReasonIncorrectPaymentDetails FailureReason = 3
// TODO(halseth): cancel state.
// TODO(joostjager): Add failure reasons for:
// LocalLiquidityInsufficient, RemoteCapacityInsufficient.
)
// String returns a human readable FailureReason
func (r FailureReason) String() string {
switch r {
case FailureReasonTimeout:
return "timeout"
case FailureReasonNoRoute:
return "no_route"
case FailureReasonError:
return "error"
case FailureReasonIncorrectPaymentDetails:
return "incorrect_payment_details"
}
return "unknown"
}
// PaymentStatus represent current status of payment
type PaymentStatus byte
const (
// StatusUnknown is the status where a payment has never been initiated
// and hence is unknown.
StatusUnknown PaymentStatus = 0
// StatusInFlight is the status where a payment has been initiated, but
// a response has not been received.
StatusInFlight PaymentStatus = 1
// StatusSucceeded is the status where a payment has been initiated and
// the payment was completed successfully.
StatusSucceeded PaymentStatus = 2
// StatusFailed is the status where a payment has been initiated and a
// failure result has come back.
StatusFailed PaymentStatus = 3
)
// Bytes returns status as slice of bytes.
func (ps PaymentStatus) Bytes() []byte {
return []byte{byte(ps)}
}
// FromBytes sets status from slice of bytes.
func (ps *PaymentStatus) FromBytes(status []byte) error {
if len(status) != 1 {
return errors.New("payment status is empty")
}
switch PaymentStatus(status[0]) {
case StatusUnknown, StatusInFlight, StatusSucceeded, StatusFailed:
*ps = PaymentStatus(status[0])
default:
return errors.New("unknown payment status")
}
return nil
}
// String returns readable representation of payment status.
func (ps PaymentStatus) String() string {
switch ps {
case StatusUnknown:
return "Unknown"
case StatusInFlight:
return "In Flight"
case StatusSucceeded:
return "Succeeded"
case StatusFailed:
return "Failed"
default:
return "Unknown"
}
}
// PaymentCreationInfo is the information necessary to have ready when
// initiating a payment, moving it into state InFlight.
type PaymentCreationInfo struct {
// PaymentHash is the hash this payment is paying to.
PaymentHash lntypes.Hash
// Value is the amount we are paying.
Value lnwire.MilliSatoshi
// CreatingDate is the time when this payment was initiated.
CreationDate time.Time
// PaymentRequest is the full payment request, if any.
PaymentRequest []byte
}
// PaymentAttemptInfo contains information about a specific payment attempt for
// a given payment. This information is used by the router to handle any errors
// coming back after an attempt is made, and to query the switch about the
// status of a payment. For settled payment this will be the information for
// the succeeding payment attempt.
type PaymentAttemptInfo struct {
// PaymentID is the unique ID used for this attempt.
PaymentID uint64
// SessionKey is the ephemeral key used for this payment attempt.
SessionKey *btcec.PrivateKey
// Route is the route attempted to send the HTLC.
Route route.Route
}
// Payment is a wrapper around a payment's PaymentCreationInfo,
// PaymentAttemptInfo, and preimage. All payments will have the
// PaymentCreationInfo set, the PaymentAttemptInfo will be set only if at least
// one payment attempt has been made, while only completed payments will have a
// non-zero payment preimage.
type Payment struct {
// sequenceNum is a unique identifier used to sort the payments in
// order of creation.
sequenceNum uint64
// Status is the current PaymentStatus of this payment.
Status PaymentStatus
// Info holds all static information about this payment, and is
// populated when the payment is initiated.
Info *PaymentCreationInfo
// Attempt is the information about the last payment attempt made.
//
// NOTE: Can be nil if no attempt is yet made.
Attempt *PaymentAttemptInfo
// PaymentPreimage is the preimage of a successful payment. This serves
// as a proof of payment. It will only be non-nil for settled payments.
//
// NOTE: Can be nil if payment is not settled.
PaymentPreimage *lntypes.Preimage
// Failure is a failure reason code indicating the reason the payment
// failed. It is only non-nil for failed payments.
//
// NOTE: Can be nil if payment is not failed.
Failure *FailureReason
}
// FetchPayments returns all sent payments found in the DB.
func (db *DB) FetchPayments() ([]*Payment, error) {
var payments []*Payment
err := db.View(func(tx *bbolt.Tx) error {
paymentsBucket := tx.Bucket(paymentsRootBucket)
if paymentsBucket == nil {
return nil
}
return paymentsBucket.ForEach(func(k, v []byte) error {
bucket := paymentsBucket.Bucket(k)
if bucket == nil {
// We only expect sub-buckets to be found in
// this top-level bucket.
return fmt.Errorf("non bucket element in " +
"payments bucket")
}
p, err := fetchPayment(bucket)
if err != nil {
return err
}
payments = append(payments, p)
// For older versions of lnd, duplicate payments to a
// payment has was possible. These will be found in a
// sub-bucket indexed by their sequence number if
// available.
dup := bucket.Bucket(paymentDuplicateBucket)
if dup == nil {
return nil
}
return dup.ForEach(func(k, v []byte) error {
subBucket := dup.Bucket(k)
if subBucket == nil {
// We one bucket for each duplicate to
// be found.
return fmt.Errorf("non bucket element" +
"in duplicate bucket")
}
p, err := fetchPayment(subBucket)
if err != nil {
return err
}
payments = append(payments, p)
return nil
})
})
})
if err != nil {
return nil, err
}
// Before returning, sort the payments by their sequence number.
sort.Slice(payments, func(i, j int) bool {
return payments[i].sequenceNum < payments[j].sequenceNum
})
return payments, nil
}
func fetchPayment(bucket *bbolt.Bucket) (*Payment, error) {
var (
err error
p = &Payment{}
)
seqBytes := bucket.Get(paymentSequenceKey)
if seqBytes == nil {
return nil, fmt.Errorf("sequence number not found")
}
p.sequenceNum = binary.BigEndian.Uint64(seqBytes)
// Get the payment status.
p.Status = fetchPaymentStatus(bucket)
// Get the PaymentCreationInfo.
b := bucket.Get(paymentCreationInfoKey)
if b == nil {
return nil, fmt.Errorf("creation info not found")
}
r := bytes.NewReader(b)
p.Info, err = deserializePaymentCreationInfo(r)
if err != nil {
return nil, err
}
// Get the PaymentAttemptInfo. This can be unset.
b = bucket.Get(paymentAttemptInfoKey)
if b != nil {
r = bytes.NewReader(b)
p.Attempt, err = deserializePaymentAttemptInfo(r)
if err != nil {
return nil, err
}
}
// Get the payment preimage. This is only found for
// completed payments.
b = bucket.Get(paymentSettleInfoKey)
if b != nil {
var preimg lntypes.Preimage
copy(preimg[:], b[:])
p.PaymentPreimage = &preimg
}
// Get failure reason if available.
b = bucket.Get(paymentFailInfoKey)
if b != nil {
reason := FailureReason(b[0])
p.Failure = &reason
}
return p, nil
}
func serializePaymentCreationInfo(w io.Writer, c *PaymentCreationInfo) error {
var scratch [8]byte
if _, err := w.Write(c.PaymentHash[:]); err != nil {
return err
}
byteOrder.PutUint64(scratch[:], uint64(c.Value))
if _, err := w.Write(scratch[:]); err != nil {
return err
}
byteOrder.PutUint64(scratch[:], uint64(c.CreationDate.Unix()))
if _, err := w.Write(scratch[:]); err != nil {
return err
}
byteOrder.PutUint32(scratch[:4], uint32(len(c.PaymentRequest)))
if _, err := w.Write(scratch[:4]); err != nil {
return err
}
if _, err := w.Write(c.PaymentRequest[:]); err != nil {
return err
}
return nil
}
func deserializePaymentCreationInfo(r io.Reader) (*PaymentCreationInfo, error) {
var scratch [8]byte
c := &PaymentCreationInfo{}
if _, err := io.ReadFull(r, c.PaymentHash[:]); err != nil {
return nil, err
}
if _, err := io.ReadFull(r, scratch[:]); err != nil {
return nil, err
}
c.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:]))
if _, err := io.ReadFull(r, scratch[:]); err != nil {
return nil, err
}
c.CreationDate = time.Unix(int64(byteOrder.Uint64(scratch[:])), 0)
if _, err := io.ReadFull(r, scratch[:4]); err != nil {
return nil, err
}
reqLen := uint32(byteOrder.Uint32(scratch[:4]))
payReq := make([]byte, reqLen)
if reqLen > 0 {
if _, err := io.ReadFull(r, payReq[:]); err != nil {
return nil, err
}
}
c.PaymentRequest = payReq
return c, nil
}
func serializePaymentAttemptInfo(w io.Writer, a *PaymentAttemptInfo) error {
if err := WriteElements(w, a.PaymentID, a.SessionKey); err != nil {
return err
}
if err := SerializeRoute(w, a.Route); err != nil {
return err
}
return nil
}
func deserializePaymentAttemptInfo(r io.Reader) (*PaymentAttemptInfo, error) {
a := &PaymentAttemptInfo{}
err := ReadElements(r, &a.PaymentID, &a.SessionKey)
if err != nil {
return nil, err
}
a.Route, err = DeserializeRoute(r)
if err != nil {
return nil, err
}
return a, nil
}
func serializeHop(w io.Writer, h *route.Hop) error {
if err := WriteElements(w,
h.PubKeyBytes[:], h.ChannelID, h.OutgoingTimeLock,
h.AmtToForward,
); err != nil {
return err
}
if err := binary.Write(w, byteOrder, h.LegacyPayload); err != nil {
return err
}
// For legacy payloads, we don't need to write any TLV records, so
// we'll write a zero indicating the our serialized TLV map has no
// records.
if h.LegacyPayload {
return WriteElements(w, uint32(0))
}
// Otherwise, we'll transform our slice of records into a map of the
// raw bytes, then serialize them in-line with a length (number of
// elements) prefix.
mapRecords, err := tlv.RecordsToMap(h.TLVRecords)
if err != nil {
return err
}
numRecords := uint32(len(mapRecords))
if err := WriteElements(w, numRecords); err != nil {
return err
}
for recordType, rawBytes := range mapRecords {
if err := WriteElements(w, recordType); err != nil {
return err
}
if err := wire.WriteVarBytes(w, 0, rawBytes); err != nil {
return err
}
}
return nil
}
// maxOnionPayloadSize is the largest Sphinx payload possible, so we don't need
// to read/write a TLV stream larger than this.
const maxOnionPayloadSize = 1300
func deserializeHop(r io.Reader) (*route.Hop, error) {
h := &route.Hop{}
var pub []byte
if err := ReadElements(r, &pub); err != nil {
return nil, err
}
copy(h.PubKeyBytes[:], pub)
if err := ReadElements(r,
&h.ChannelID, &h.OutgoingTimeLock, &h.AmtToForward,
); err != nil {
return nil, err
}
// TODO(roasbeef): change field to allow LegacyPayload false to be the
// legacy default?
err := binary.Read(r, byteOrder, &h.LegacyPayload)
if err != nil {
return nil, err
}
var numElements uint32
if err := ReadElements(r, &numElements); err != nil {
return nil, err
}
// If there're no elements, then we can return early.
if numElements == 0 {
return h, nil
}
tlvMap := make(map[uint64][]byte)
for i := uint32(0); i < numElements; i++ {
var tlvType uint64
if err := ReadElements(r, &tlvType); err != nil {
return nil, err
}
rawRecordBytes, err := wire.ReadVarBytes(
r, 0, maxOnionPayloadSize, "tlv",
)
if err != nil {
return nil, err
}
tlvMap[tlvType] = rawRecordBytes
}
tlvRecords, err := tlv.MapToRecords(tlvMap)
if err != nil {
return nil, err
}
h.TLVRecords = tlvRecords
return h, nil
}
// SerializeRoute serializes a route.
func SerializeRoute(w io.Writer, r route.Route) error {
if err := WriteElements(w,
r.TotalTimeLock, r.TotalAmount, r.SourcePubKey[:],
); err != nil {
return err
}
if err := WriteElements(w, uint32(len(r.Hops))); err != nil {
return err
}
for _, h := range r.Hops {
if err := serializeHop(w, h); err != nil {
return err
}
}
return nil
}
// DeserializeRoute deserializes a route.
func DeserializeRoute(r io.Reader) (route.Route, error) {
rt := route.Route{}
if err := ReadElements(r,
&rt.TotalTimeLock, &rt.TotalAmount,
); err != nil {
return rt, err
}
var pub []byte
if err := ReadElements(r, &pub); err != nil {
return rt, err
}
copy(rt.SourcePubKey[:], pub)
var numHops uint32
if err := ReadElements(r, &numHops); err != nil {
return rt, err
}
var hops []*route.Hop
for i := uint32(0); i < numHops; i++ {
hop, err := deserializeHop(r)
if err != nil {
return rt, err
}
hops = append(hops, hop)
}
rt.Hops = hops
return rt, nil
}

@ -0,0 +1,108 @@
package migration_01_to_11
import (
"bytes"
"fmt"
"math/rand"
"time"
"github.com/btcsuite/btcd/btcec"
"github.com/lightningnetwork/lnd/lnwire"
)
var (
priv, _ = btcec.NewPrivateKey(btcec.S256())
pub = priv.PubKey()
)
func makeFakePayment() *outgoingPayment {
fakeInvoice := &Invoice{
// Use single second precision to avoid false positive test
// failures due to the monotonic time component.
CreationDate: time.Unix(time.Now().Unix(), 0),
Memo: []byte("fake memo"),
Receipt: []byte("fake receipt"),
PaymentRequest: []byte(""),
}
copy(fakeInvoice.Terms.PaymentPreimage[:], rev[:])
fakeInvoice.Terms.Value = lnwire.NewMSatFromSatoshis(10000)
fakePath := make([][33]byte, 3)
for i := 0; i < 3; i++ {
copy(fakePath[i][:], bytes.Repeat([]byte{byte(i)}, 33))
}
fakePayment := &outgoingPayment{
Invoice: *fakeInvoice,
Fee: 101,
Path: fakePath,
TimeLockLength: 1000,
}
copy(fakePayment.PaymentPreimage[:], rev[:])
return fakePayment
}
// randomBytes creates random []byte with length in range [minLen, maxLen)
func randomBytes(minLen, maxLen int) ([]byte, error) {
randBuf := make([]byte, minLen+rand.Intn(maxLen-minLen))
if _, err := rand.Read(randBuf); err != nil {
return nil, fmt.Errorf("Internal error. "+
"Cannot generate random string: %v", err)
}
return randBuf, nil
}
func makeRandomFakePayment() (*outgoingPayment, error) {
var err error
fakeInvoice := &Invoice{
// Use single second precision to avoid false positive test
// failures due to the monotonic time component.
CreationDate: time.Unix(time.Now().Unix(), 0),
}
fakeInvoice.Memo, err = randomBytes(1, 50)
if err != nil {
return nil, err
}
fakeInvoice.Receipt, err = randomBytes(1, 50)
if err != nil {
return nil, err
}
fakeInvoice.PaymentRequest, err = randomBytes(1, 50)
if err != nil {
return nil, err
}
preImg, err := randomBytes(32, 33)
if err != nil {
return nil, err
}
copy(fakeInvoice.Terms.PaymentPreimage[:], preImg)
fakeInvoice.Terms.Value = lnwire.MilliSatoshi(rand.Intn(10000))
fakePathLen := 1 + rand.Intn(5)
fakePath := make([][33]byte, fakePathLen)
for i := 0; i < fakePathLen; i++ {
b, err := randomBytes(33, 34)
if err != nil {
return nil, err
}
copy(fakePath[i][:], b)
}
fakePayment := &outgoingPayment{
Invoice: *fakeInvoice,
Fee: lnwire.MilliSatoshi(rand.Intn(1001)),
Path: fakePath,
TimeLockLength: uint32(rand.Intn(10000)),
}
copy(fakePayment.PaymentPreimage[:], fakeInvoice.Terms.PaymentPreimage[:])
return fakePayment, nil
}

@ -12,7 +12,6 @@ import (
"github.com/btcsuite/btcd/btcec"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/routing/route"
"github.com/lightningnetwork/lnd/tlv"
)
@ -53,34 +52,6 @@ var (
}
)
func makeFakePayment() *outgoingPayment {
fakeInvoice := &Invoice{
// Use single second precision to avoid false positive test
// failures due to the monotonic time component.
CreationDate: time.Unix(time.Now().Unix(), 0),
Memo: []byte("fake memo"),
Receipt: []byte("fake receipt"),
PaymentRequest: []byte(""),
}
copy(fakeInvoice.Terms.PaymentPreimage[:], rev[:])
fakeInvoice.Terms.Value = lnwire.NewMSatFromSatoshis(10000)
fakePath := make([][33]byte, 3)
for i := 0; i < 3; i++ {
copy(fakePath[i][:], bytes.Repeat([]byte{byte(i)}, 33))
}
fakePayment := &outgoingPayment{
Invoice: *fakeInvoice,
Fee: 101,
Path: fakePath,
TimeLockLength: 1000,
}
copy(fakePayment.PaymentPreimage[:], rev[:])
return fakePayment
}
func makeFakeInfo() (*PaymentCreationInfo, *PaymentAttemptInfo) {
var preimg lntypes.Preimage
copy(preimg[:], rev[:])
@ -114,58 +85,6 @@ func randomBytes(minLen, maxLen int) ([]byte, error) {
return randBuf, nil
}
func makeRandomFakePayment() (*outgoingPayment, error) {
var err error
fakeInvoice := &Invoice{
// Use single second precision to avoid false positive test
// failures due to the monotonic time component.
CreationDate: time.Unix(time.Now().Unix(), 0),
}
fakeInvoice.Memo, err = randomBytes(1, 50)
if err != nil {
return nil, err
}
fakeInvoice.Receipt, err = randomBytes(1, 50)
if err != nil {
return nil, err
}
fakeInvoice.PaymentRequest, err = randomBytes(1, 50)
if err != nil {
return nil, err
}
preImg, err := randomBytes(32, 33)
if err != nil {
return nil, err
}
copy(fakeInvoice.Terms.PaymentPreimage[:], preImg)
fakeInvoice.Terms.Value = lnwire.MilliSatoshi(rand.Intn(10000))
fakePathLen := 1 + rand.Intn(5)
fakePath := make([][33]byte, fakePathLen)
for i := 0; i < fakePathLen; i++ {
b, err := randomBytes(33, 34)
if err != nil {
return nil, err
}
copy(fakePath[i][:], b)
}
fakePayment := &outgoingPayment{
Invoice: *fakeInvoice,
Fee: lnwire.MilliSatoshi(rand.Intn(1001)),
Path: fakePath,
TimeLockLength: uint32(rand.Intn(10000)),
}
copy(fakePayment.PaymentPreimage[:], fakeInvoice.Terms.PaymentPreimage[:])
return fakePayment, nil
}
func TestSentPaymentSerialization(t *testing.T) {
t.Parallel()