Conner Fromknecht
5 years ago
12 changed files with 414 additions and 200 deletions
@ -0,0 +1,44 @@
|
||||
package buffer_test |
||||
|
||||
import ( |
||||
"bytes" |
||||
"testing" |
||||
|
||||
"github.com/lightningnetwork/lnd/buffer" |
||||
) |
||||
|
||||
// TestRecycleSlice asserts that RecycleSlice always zeros a byte slice.
|
||||
func TestRecycleSlice(t *testing.T) { |
||||
tests := []struct { |
||||
name string |
||||
slice []byte |
||||
}{ |
||||
{ |
||||
name: "length zero", |
||||
}, |
||||
{ |
||||
name: "length one", |
||||
slice: []byte("a"), |
||||
}, |
||||
{ |
||||
name: "length power of two length", |
||||
slice: bytes.Repeat([]byte("b"), 16), |
||||
}, |
||||
{ |
||||
name: "length non power of two", |
||||
slice: bytes.Repeat([]byte("c"), 27), |
||||
}, |
||||
} |
||||
|
||||
for _, test := range tests { |
||||
t.Run(test.name, func(t *testing.T) { |
||||
buffer.RecycleSlice(test.slice) |
||||
|
||||
expSlice := make([]byte, len(test.slice)) |
||||
if !bytes.Equal(expSlice, test.slice) { |
||||
t.Fatalf("slice not recycled, want: %v, got: %v", |
||||
expSlice, test.slice) |
||||
} |
||||
}) |
||||
} |
||||
} |
@ -0,0 +1,17 @@
|
||||
package buffer |
||||
|
||||
// RecycleSlice zeroes byte slice, making it fresh for another use.
|
||||
// Zeroing the buffer using a logarithmic number of calls to the optimized copy
|
||||
// method. Benchmarking shows this to be ~30 times faster than a for loop that
|
||||
// sets each index to 0 for ~65KB buffers use for wire messages. Inspired by:
|
||||
// https://stackoverflow.com/questions/30614165/is-there-analog-of-memset-in-go
|
||||
func RecycleSlice(b []byte) { |
||||
if len(b) == 0 { |
||||
return |
||||
} |
||||
|
||||
b[0] = 0 |
||||
for i := 1; i < len(b); i *= 2 { |
||||
copy(b[i:], b[:i]) |
||||
} |
||||
} |
@ -0,0 +1,19 @@
|
||||
package buffer |
||||
|
||||
import ( |
||||
"github.com/lightningnetwork/lnd/lnwire" |
||||
) |
||||
|
||||
// WriteSize represents the size of the maximum plaintext message than can be
|
||||
// sent using brontide. The buffer does not include extra space for the MAC, as
|
||||
// that is applied by the Noise protocol after encrypting the plaintext.
|
||||
const WriteSize = lnwire.MaxMessagePayload |
||||
|
||||
// Write is static byte array occupying to maximum-allowed plaintext-message
|
||||
// size.
|
||||
type Write [WriteSize]byte |
||||
|
||||
// Recycle zeroes the Write, making it fresh for another use.
|
||||
func (b *Write) Recycle() { |
||||
RecycleSlice(b[:]) |
||||
} |
@ -1,79 +0,0 @@
|
||||
package lnpeer |
||||
|
||||
import ( |
||||
"time" |
||||
|
||||
"github.com/lightningnetwork/lnd/lnwire" |
||||
"github.com/lightningnetwork/lnd/queue" |
||||
) |
||||
|
||||
const ( |
||||
// DefaultGCInterval is the default interval that the WriteBufferPool
|
||||
// will perform a sweep to see which expired buffers can be released to
|
||||
// the runtime.
|
||||
DefaultGCInterval = 15 * time.Second |
||||
|
||||
// DefaultExpiryInterval is the default, minimum interval that must
|
||||
// elapse before a WriteBuffer will be released. The maximum time before
|
||||
// the buffer can be released is equal to the expiry interval plus the
|
||||
// gc interval.
|
||||
DefaultExpiryInterval = 30 * time.Second |
||||
) |
||||
|
||||
// WriteBuffer is static byte array occupying to maximum-allowed
|
||||
// plaintext-message size.
|
||||
type WriteBuffer [lnwire.MaxMessagePayload]byte |
||||
|
||||
// Recycle zeroes the WriteBuffer, making it fresh for another use.
|
||||
// Zeroing the buffer using a logarithmic number of calls to the optimized copy
|
||||
// method. Benchmarking shows this to be ~30 times faster than a for loop that
|
||||
// sets each index to 0 for this buffer size. Inspired by:
|
||||
// https://stackoverflow.com/questions/30614165/is-there-analog-of-memset-in-go
|
||||
//
|
||||
// This is part of the queue.Recycler interface.
|
||||
func (b *WriteBuffer) Recycle() { |
||||
b[0] = 0 |
||||
for i := 1; i < lnwire.MaxMessagePayload; i *= 2 { |
||||
copy(b[i:], b[:i]) |
||||
} |
||||
} |
||||
|
||||
// newRecyclableWriteBuffer is a constructor that returns a WriteBuffer typed as
|
||||
// a queue.Recycler.
|
||||
func newRecyclableWriteBuffer() queue.Recycler { |
||||
return new(WriteBuffer) |
||||
} |
||||
|
||||
// A compile-time constraint to ensure that *WriteBuffer implements the
|
||||
// queue.Recycler interface.
|
||||
var _ queue.Recycler = (*WriteBuffer)(nil) |
||||
|
||||
// WriteBufferPool acts a global pool of WriteBuffers, that dynamically
|
||||
// allocates and reclaims buffers in response to load.
|
||||
type WriteBufferPool struct { |
||||
pool *queue.GCQueue |
||||
} |
||||
|
||||
// NewWriteBufferPool returns a freshly instantiated WriteBufferPool, using the
|
||||
// given gcInterval and expiryIntervals.
|
||||
func NewWriteBufferPool( |
||||
gcInterval, expiryInterval time.Duration) *WriteBufferPool { |
||||
|
||||
return &WriteBufferPool{ |
||||
pool: queue.NewGCQueue( |
||||
newRecyclableWriteBuffer, 100, |
||||
gcInterval, expiryInterval, |
||||
), |
||||
} |
||||
} |
||||
|
||||
// Take returns a fresh WriteBuffer to the caller.
|
||||
func (p *WriteBufferPool) Take() *WriteBuffer { |
||||
return p.pool.Take().(*WriteBuffer) |
||||
} |
||||
|
||||
// Return returns the WriteBuffer to the pool, so that it can be recycled or
|
||||
// released.
|
||||
func (p *WriteBufferPool) Return(buf *WriteBuffer) { |
||||
p.pool.Return(buf) |
||||
} |
@ -1,67 +0,0 @@
|
||||
package lnpeer_test |
||||
|
||||
import ( |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/lightningnetwork/lnd/lnpeer" |
||||
) |
||||
|
||||
// TestWriteBufferPool verifies that buffer pool properly resets used write
|
||||
// buffers.
|
||||
func TestWriteBufferPool(t *testing.T) { |
||||
const ( |
||||
gcInterval = time.Second |
||||
expiryInterval = 250 * time.Millisecond |
||||
) |
||||
|
||||
bp := lnpeer.NewWriteBufferPool(gcInterval, expiryInterval) |
||||
|
||||
// Take a fresh write buffer from the pool.
|
||||
writeBuf := bp.Take() |
||||
|
||||
// Dirty the write buffer.
|
||||
for i := range writeBuf[:] { |
||||
writeBuf[i] = 0xff |
||||
} |
||||
|
||||
// Return the buffer to the pool.
|
||||
bp.Return(writeBuf) |
||||
|
||||
// Take buffers from the pool until we find the original. We expect at
|
||||
// most two, in the even that a fresh buffer is populated after the
|
||||
// first is taken.
|
||||
for i := 0; i < 2; i++ { |
||||
// Wait a small duration to ensure the tests behave reliable,
|
||||
// and don't activate the non-blocking case unintentionally.
|
||||
<-time.After(time.Millisecond) |
||||
|
||||
// Take a buffer, skipping those whose pointer does not match
|
||||
// the one we dirtied.
|
||||
writeBuf2 := bp.Take() |
||||
if writeBuf2 != writeBuf { |
||||
continue |
||||
} |
||||
|
||||
// Finally, verify that the buffer has been properly cleaned.
|
||||
for i := range writeBuf2[:] { |
||||
if writeBuf2[i] != 0 { |
||||
t.Fatalf("buffer was not recycled") |
||||
} |
||||
} |
||||
|
||||
return |
||||
} |
||||
|
||||
t.Fatalf("original buffer not found") |
||||
} |
||||
|
||||
// BenchmarkWriteBufferRecycle tests how quickly a WriteBuffer can be zeroed.
|
||||
func BenchmarkWriteBufferRecycle(b *testing.B) { |
||||
b.ReportAllocs() |
||||
|
||||
buffer := new(lnpeer.WriteBuffer) |
||||
for i := 0; i < b.N; i++ { |
||||
buffer.Recycle() |
||||
} |
||||
} |
@ -0,0 +1,52 @@
|
||||
package pool |
||||
|
||||
import ( |
||||
"time" |
||||
|
||||
"github.com/lightningnetwork/lnd/queue" |
||||
) |
||||
|
||||
// Recycler is an interface that allows an object to be reclaimed without
|
||||
// needing to be returned to the runtime.
|
||||
type Recycler interface { |
||||
// Recycle resets the object to its default state.
|
||||
Recycle() |
||||
} |
||||
|
||||
// Recycle is a generic queue for recycling objects implementing the Recycler
|
||||
// interface. It is backed by an underlying queue.GCQueue, and invokes the
|
||||
// Recycle method on returned objects before returning them to the queue.
|
||||
type Recycle struct { |
||||
queue *queue.GCQueue |
||||
} |
||||
|
||||
// NewRecycle initializes a fresh Recycle instance.
|
||||
func NewRecycle(newItem func() interface{}, returnQueueSize int, |
||||
gcInterval, expiryInterval time.Duration) *Recycle { |
||||
|
||||
return &Recycle{ |
||||
queue: queue.NewGCQueue( |
||||
newItem, returnQueueSize, |
||||
gcInterval, expiryInterval, |
||||
), |
||||
} |
||||
} |
||||
|
||||
// Take returns an element from the pool.
|
||||
func (r *Recycle) Take() interface{} { |
||||
return r.queue.Take() |
||||
} |
||||
|
||||
// Return returns an item implementing the Recycler interface to the pool. The
|
||||
// Recycle method is invoked before returning the item to improve performance
|
||||
// and utilization under load.
|
||||
func (r *Recycle) Return(item Recycler) { |
||||
// Recycle the item to ensure that a dirty instance is never offered
|
||||
// from Take. The call is done here so that the CPU cycles spent
|
||||
// clearing the buffer are owned by the caller, and not by the queue
|
||||
// itself. This makes the queue more likely to be available to deliver
|
||||
// items in the free list.
|
||||
item.Recycle() |
||||
|
||||
r.queue.Return(item) |
||||
} |
@ -0,0 +1,193 @@
|
||||
package pool_test |
||||
|
||||
import ( |
||||
"bytes" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/lightningnetwork/lnd/buffer" |
||||
"github.com/lightningnetwork/lnd/pool" |
||||
) |
||||
|
||||
type mockRecycler bool |
||||
|
||||
func (m *mockRecycler) Recycle() { |
||||
*m = false |
||||
} |
||||
|
||||
// TestRecyclers verifies that known recyclable types properly return to their
|
||||
// zero-value after invoking Recycle.
|
||||
func TestRecyclers(t *testing.T) { |
||||
tests := []struct { |
||||
name string |
||||
newItem func() interface{} |
||||
}{ |
||||
{ |
||||
"mock recycler", |
||||
func() interface{} { return new(mockRecycler) }, |
||||
}, |
||||
{ |
||||
"write_buffer", |
||||
func() interface{} { return new(buffer.Write) }, |
||||
}, |
||||
} |
||||
|
||||
for _, test := range tests { |
||||
t.Run(test.name, func(t *testing.T) { |
||||
// Initialize the Recycler to test.
|
||||
r := test.newItem().(pool.Recycler) |
||||
|
||||
// Dirty the item.
|
||||
dirtyGeneric(t, r) |
||||
|
||||
// Invoke Recycle to clear the item.
|
||||
r.Recycle() |
||||
|
||||
// Assert the item is now clean.
|
||||
isCleanGeneric(t, r) |
||||
}) |
||||
} |
||||
} |
||||
|
||||
type recyclePoolTest struct { |
||||
name string |
||||
newPool func() interface{} |
||||
} |
||||
|
||||
// TestGenericRecyclePoolTests generically tests that pools derived from the
|
||||
// base Recycle pool properly are properly configured.
|
||||
func TestConcreteRecyclePoolTests(t *testing.T) { |
||||
const ( |
||||
gcInterval = time.Second |
||||
expiryInterval = 250 * time.Millisecond |
||||
) |
||||
|
||||
tests := []recyclePoolTest{ |
||||
{ |
||||
name: "write buffer pool", |
||||
newPool: func() interface{} { |
||||
return pool.NewWriteBuffer( |
||||
gcInterval, expiryInterval, |
||||
) |
||||
}, |
||||
}, |
||||
} |
||||
|
||||
for _, test := range tests { |
||||
t.Run(test.name, func(t *testing.T) { |
||||
testRecyclePool(t, test) |
||||
}) |
||||
} |
||||
} |
||||
|
||||
func testRecyclePool(t *testing.T, test recyclePoolTest) { |
||||
p := test.newPool() |
||||
|
||||
// Take an item from the pool.
|
||||
r1 := takeGeneric(t, p) |
||||
|
||||
// Dirty the item.
|
||||
dirtyGeneric(t, r1) |
||||
|
||||
// Return the item to the pool.
|
||||
returnGeneric(t, p, r1) |
||||
|
||||
// Take items from the pool until we find the original. We expect at
|
||||
// most two, in the event that a fresh item is populated after the
|
||||
// first is taken.
|
||||
for i := 0; i < 2; i++ { |
||||
// Wait a small duration to ensure the tests are reliable, and
|
||||
// don't to active the non-blocking case unintentionally.
|
||||
<-time.After(time.Millisecond) |
||||
|
||||
r2 := takeGeneric(t, p) |
||||
|
||||
// Take an item, skipping those whose pointer does not match the
|
||||
// one we dirtied.
|
||||
if r1 != r2 { |
||||
continue |
||||
} |
||||
|
||||
// Finally, verify that the item has been properly cleaned.
|
||||
isCleanGeneric(t, r2) |
||||
|
||||
return |
||||
} |
||||
|
||||
t.Fatalf("original item not found") |
||||
} |
||||
|
||||
func takeGeneric(t *testing.T, p interface{}) pool.Recycler { |
||||
t.Helper() |
||||
|
||||
switch pp := p.(type) { |
||||
case *pool.WriteBuffer: |
||||
return pp.Take() |
||||
|
||||
default: |
||||
t.Fatalf("unknown pool type: %T", p) |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
func returnGeneric(t *testing.T, p, item interface{}) { |
||||
t.Helper() |
||||
|
||||
switch pp := p.(type) { |
||||
case *pool.WriteBuffer: |
||||
pp.Return(item.(*buffer.Write)) |
||||
|
||||
default: |
||||
t.Fatalf("unknown pool type: %T", p) |
||||
} |
||||
} |
||||
|
||||
func dirtyGeneric(t *testing.T, i interface{}) { |
||||
t.Helper() |
||||
|
||||
switch item := i.(type) { |
||||
case *mockRecycler: |
||||
*item = true |
||||
|
||||
case *buffer.Write: |
||||
dirtySlice(item[:]) |
||||
|
||||
default: |
||||
t.Fatalf("unknown item type: %T", i) |
||||
} |
||||
|
||||
} |
||||
|
||||
func dirtySlice(slice []byte) { |
||||
for i := range slice { |
||||
slice[i] = 0xff |
||||
} |
||||
} |
||||
|
||||
func isCleanGeneric(t *testing.T, i interface{}) { |
||||
t.Helper() |
||||
|
||||
switch item := i.(type) { |
||||
case *mockRecycler: |
||||
if isDirty := *item; isDirty { |
||||
t.Fatalf("mock recycler still diry") |
||||
} |
||||
|
||||
case *buffer.Write: |
||||
isCleanSlice(t, item[:]) |
||||
|
||||
default: |
||||
t.Fatalf("unknown item type: %T", i) |
||||
} |
||||
} |
||||
|
||||
func isCleanSlice(t *testing.T, slice []byte) { |
||||
t.Helper() |
||||
|
||||
expSlice := make([]byte, len(slice)) |
||||
if !bytes.Equal(expSlice, slice) { |
||||
t.Fatalf("slice not recycled, want: %v, got: %v", |
||||
expSlice, slice) |
||||
} |
||||
} |
@ -0,0 +1,48 @@
|
||||
package pool |
||||
|
||||
import ( |
||||
"time" |
||||
|
||||
"github.com/lightningnetwork/lnd/buffer" |
||||
) |
||||
|
||||
const ( |
||||
// DefaultWriteBufferGCInterval is the default interval that a Write
|
||||
// will perform a sweep to see which expired buffer.Writes can be
|
||||
// released to the runtime.
|
||||
DefaultWriteBufferGCInterval = 15 * time.Second |
||||
|
||||
// DefaultWriteBufferExpiryInterval is the default, minimum interval
|
||||
// that must elapse before a Write will release a buffer.Write. The
|
||||
// maximum time before the buffer can be released is equal to the expiry
|
||||
// interval plus the gc interval.
|
||||
DefaultWriteBufferExpiryInterval = 30 * time.Second |
||||
) |
||||
|
||||
// WriteBuffer is a pool of recycled buffer.Write items, that dynamically
|
||||
// allocates and reclaims buffers in response to load.
|
||||
type WriteBuffer struct { |
||||
pool *Recycle |
||||
} |
||||
|
||||
// NewWriteBuffer returns a freshly instantiated WriteBuffer, using the given
|
||||
// gcInterval and expiryIntervals.
|
||||
func NewWriteBuffer(gcInterval, expiryInterval time.Duration) *WriteBuffer { |
||||
return &WriteBuffer{ |
||||
pool: NewRecycle( |
||||
func() interface{} { return new(buffer.Write) }, |
||||
100, gcInterval, expiryInterval, |
||||
), |
||||
} |
||||
} |
||||
|
||||
// Take returns a fresh buffer.Write to the caller.
|
||||
func (p *WriteBuffer) Take() *buffer.Write { |
||||
return p.pool.Take().(*buffer.Write) |
||||
} |
||||
|
||||
// Return returns the buffer.Write to the pool, so that it can be recycled or
|
||||
// released.
|
||||
func (p *WriteBuffer) Return(buf *buffer.Write) { |
||||
p.pool.Return(buf) |
||||
} |
Loading…
Reference in new issue