Merge pull request #3198 from halseth/foreach-non-modify

Avoid modifying bucket during ForEach loop, and Cursor traversal
This commit is contained in:
Olaoluwa Osuntokun 2019-06-14 22:46:23 +02:00 committed by GitHub
commit e65b8e3805
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 82 additions and 55 deletions

@ -2087,10 +2087,6 @@ func (c *OpenChannel) CloseChannel(summary *ChannelCloseSummary) error {
// information stored within the revocation log.
logBucket := chanBucket.Bucket(revocationLogBucket)
if logBucket != nil {
err := wipeChannelLogEntries(logBucket)
if err != nil {
return err
}
err = chanBucket.DeleteBucket(revocationLogBucket)
if err != nil {
return err
@ -2713,16 +2709,3 @@ func fetchChannelLogEntry(log *bbolt.Bucket,
commitReader := bytes.NewReader(commitBytes)
return deserializeChanCommit(commitReader)
}
func wipeChannelLogEntries(log *bbolt.Bucket) error {
// TODO(roasbeef): comment
logCursor := log.Cursor()
for k, _ := logCursor.First(); k != nil; k, _ = logCursor.Next() {
if err := logCursor.Delete(); err != nil {
return err
}
}
return nil
}

@ -1061,6 +1061,9 @@ func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) ([]*ChannelEdgeInf
// Scan from chanIDStart to chanIDEnd, deleting every
// found edge.
// NOTE: we must delete the edges after the cursor loop, since
// modifying the bucket while traversing is not safe.
var keys [][]byte
cursor := edgeIndex.Cursor()
for k, v := cursor.Seek(chanIDStart[:]); k != nil &&
bytes.Compare(k, chanIDEnd[:]) <= 0; k, v = cursor.Next() {
@ -1070,6 +1073,12 @@ func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) ([]*ChannelEdgeInf
if err != nil {
return err
}
keys = append(keys, k)
removedChans = append(removedChans, &edgeInfo)
}
for _, k := range keys {
err = delChannelEdge(
edges, edgeIndex, chanIndex, zombieIndex, nodes,
k, false,
@ -1077,8 +1086,6 @@ func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) ([]*ChannelEdgeInf
if err != nil && err != ErrEdgeNotFound {
return err
}
removedChans = append(removedChans, &edgeInfo)
}
// Delete all the entries in the prune log having a height
@ -1099,10 +1106,18 @@ func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) ([]*ChannelEdgeInf
var pruneKeyEnd [4]byte
byteOrder.PutUint32(pruneKeyEnd[:], math.MaxUint32)
// To avoid modifying the bucket while traversing, we delete
// the keys in a second loop.
var pruneKeys [][]byte
pruneCursor := pruneBucket.Cursor()
for k, _ := pruneCursor.Seek(pruneKeyStart[:]); k != nil &&
bytes.Compare(k, pruneKeyEnd[:]) <= 0; k, _ = pruneCursor.Next() {
if err := pruneCursor.Delete(); err != nil {
pruneKeys = append(pruneKeys, k)
}
for _, k := range pruneKeys {
if err := pruneBucket.Delete(k); err != nil {
return err
}
}

@ -146,6 +146,11 @@ func migrateInvoiceTimeSeries(tx *bbolt.Tx) error {
// Now that we have all the buckets we need, we'll run through each
// invoice in the database, and update it to reflect the new format
// expected post migration.
// NOTE: we store the converted invoices and put them back into the
// database after the loop, since modifying the bucket within the
// ForEach loop is not safe.
var invoicesKeys [][]byte
var invoicesValues [][]byte
err = invoices.ForEach(func(invoiceNum, invoiceBytes []byte) error {
// If this is a sub bucket, then we'll skip it.
if invoiceBytes == nil {
@ -226,12 +231,25 @@ func migrateInvoiceTimeSeries(tx *bbolt.Tx) error {
return err
}
return invoices.Put(invoiceNum, b.Bytes())
// Save the key and value pending update for after the ForEach
// is done.
invoicesKeys = append(invoicesKeys, invoiceNum)
invoicesValues = append(invoicesValues, b.Bytes())
return nil
})
if err != nil {
return err
}
// Now put the converted invoices into the DB.
for i := range invoicesKeys {
key := invoicesKeys[i]
value := invoicesValues[i]
if err := invoices.Put(key, value); err != nil {
return err
}
}
log.Infof("Migration to invoice time series index complete!")
return nil
@ -249,6 +267,10 @@ func migrateInvoiceTimeSeriesOutgoingPayments(tx *bbolt.Tx) error {
log.Infof("Migrating invoice database to new outgoing payment format")
// We store the keys and values we want to modify since it is not safe
// to modify them directly within the ForEach loop.
var paymentKeys [][]byte
var paymentValues [][]byte
err := payBucket.ForEach(func(payID, paymentBytes []byte) error {
log.Tracef("Migrating payment %x", payID[:])
@ -290,17 +312,25 @@ func migrateInvoiceTimeSeriesOutgoingPayments(tx *bbolt.Tx) error {
}
// Now that we know the modifications was successful, we'll
// write it back to disk in the new format.
if err := payBucket.Put(payID, paymentCopy); err != nil {
return err
}
// store it to our slice of keys and values, and write it back
// to disk in the new format after the ForEach loop is over.
paymentKeys = append(paymentKeys, payID)
paymentValues = append(paymentValues, paymentCopy)
return nil
})
if err != nil {
return err
}
// Finally store the updated payments to the bucket.
for i := range paymentKeys {
key := paymentKeys[i]
value := paymentValues[i]
if err := payBucket.Put(key, value); err != nil {
return err
}
}
log.Infof("Migration to outgoing payment invoices complete!")
return nil
@ -587,6 +617,12 @@ func migrateOptionalChannelCloseSummaryFields(tx *bbolt.Tx) error {
}
log.Info("Migrating to new closed channel format...")
// We store the converted keys and values and put them back into the
// database after the loop, since modifying the bucket within the
// ForEach loop is not safe.
var closedChansKeys [][]byte
var closedChansValues [][]byte
err := closedChanBucket.ForEach(func(chanID, summary []byte) error {
r := bytes.NewReader(summary)
@ -603,12 +639,26 @@ func migrateOptionalChannelCloseSummaryFields(tx *bbolt.Tx) error {
return err
}
return closedChanBucket.Put(chanID, b.Bytes())
// Now that we know the modifications was successful, we'll
// Store the key and value to our slices, and write it back to
// disk in the new format after the ForEach loop is over.
closedChansKeys = append(closedChansKeys, chanID)
closedChansValues = append(closedChansValues, b.Bytes())
return nil
})
if err != nil {
return fmt.Errorf("unable to update closed channels: %v", err)
}
// Now put the new format back into the DB.
for i := range closedChansKeys {
key := closedChansKeys[i]
value := closedChansValues[i]
if err := closedChanBucket.Put(key, value); err != nil {
return err
}
}
log.Info("Migration to new closed channel format complete!")
return nil

@ -853,20 +853,9 @@ func (b *boltArbitratorLog) WipeHistory() error {
}
// Next, we'll delete any lingering contract state within the
// contracts bucket, and the bucket itself once we're done
// clearing it out.
contractBucket, err := scopeBucket.CreateBucketIfNotExists(
contractsBucketKey,
)
if err != nil {
return err
}
if err := contractBucket.ForEach(func(resKey, _ []byte) error {
return contractBucket.Delete(resKey)
}); err != nil {
return err
}
if err := scopeBucket.DeleteBucket(contractsBucketKey); err != nil {
// contracts bucket by removing the bucket itself.
err = scopeBucket.DeleteBucket(contractsBucketKey)
if err != nil && err != bbolt.ErrBucketNotFound {
return err
}
@ -876,20 +865,10 @@ func (b *boltArbitratorLog) WipeHistory() error {
return err
}
// Before we delta the enclosing bucket itself, we'll delta any
// chain actions that are still stored.
actionsBucket, err := scopeBucket.CreateBucketIfNotExists(
actionsBucketKey,
)
if err != nil {
return err
}
if err := actionsBucket.ForEach(func(resKey, _ []byte) error {
return actionsBucket.Delete(resKey)
}); err != nil {
return err
}
if err := scopeBucket.DeleteBucket(actionsBucketKey); err != nil {
// We'll delete any chain actions that are still stored by
// removing the enclosing bucket.
err = scopeBucket.DeleteBucket(actionsBucketKey)
if err != nil && err != bbolt.ErrBucketNotFound {
return err
}