From dfd1b3864800db7241668ff2f56aa43ec6968678 Mon Sep 17 00:00:00 2001 From: nsa Date: Thu, 15 Aug 2019 20:03:24 -0400 Subject: [PATCH 1/2] tlv: fix panic with large length This commit fixes a panic where a large length in a record could cause the DVarBytes function to fail to allocate a byte slice. --- tlv/stream.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tlv/stream.go b/tlv/stream.go index 49bb70ed..4df70af4 100644 --- a/tlv/stream.go +++ b/tlv/stream.go @@ -6,12 +6,18 @@ import ( "io" "io/ioutil" "math" + + "github.com/lightningnetwork/lnd/lnwire" ) // ErrStreamNotCanonical signals that a decoded stream does not contain records // sorting by monotonically-increasing type. var ErrStreamNotCanonical = errors.New("tlv stream is not canonical") +// ErrRecordTooLarge signals that a decoded record has a length that is too +// long to parse. +var ErrRecordTooLarge = errors.New("record is too large") + // ErrUnknownRequiredType is an error returned when decoding an unknown and even // type from a Stream. type ErrUnknownRequiredType Type @@ -183,6 +189,10 @@ func (s *Stream) Decode(r io.Reader) error { return err } + if length > lnwire.MaxMessagePayload { + return ErrRecordTooLarge + } + // Search the records known to the stream for this type. We'll // begin the search and recordIdx and walk forward until we find // it or the next record's type is larger. From 1cc48ceaf5b1800d1d174f1c29799b1bbafd3427 Mon Sep 17 00:00:00 2001 From: Conner Fromknecht Date: Wed, 21 Aug 2019 16:30:05 -0700 Subject: [PATCH 2/2] tlv/stream: create MaxRecordSize, remove lnwire import, add test --- tlv/stream.go | 14 +++++++++++--- tlv/tlv_test.go | 9 +++++++++ 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/tlv/stream.go b/tlv/stream.go index 4df70af4..159301fd 100644 --- a/tlv/stream.go +++ b/tlv/stream.go @@ -6,10 +6,14 @@ import ( "io" "io/ioutil" "math" - - "github.com/lightningnetwork/lnd/lnwire" ) +// MaxRecordSize is the maximum size of a particular record that will be parsed +// by a stream decoder. This value is currently chosen to the be equal to the +// maximum message size permitted by BOLT 1, as no record should be bigger than +// an entire message. +const MaxRecordSize = 65535 // 65KB + // ErrStreamNotCanonical signals that a decoded stream does not contain records // sorting by monotonically-increasing type. var ErrStreamNotCanonical = errors.New("tlv stream is not canonical") @@ -189,7 +193,11 @@ func (s *Stream) Decode(r io.Reader) error { return err } - if length > lnwire.MaxMessagePayload { + // Place a soft limit on the size of a sane record, which + // prevents malicious encoders from causing us to allocate an + // unbounded amount of memory when decoding variable-sized + // fields. + if length > MaxRecordSize { return ErrRecordTooLarge } diff --git a/tlv/tlv_test.go b/tlv/tlv_test.go index c3b996fe..13ef2468 100644 --- a/tlv/tlv_test.go +++ b/tlv/tlv_test.go @@ -49,6 +49,8 @@ type N1 struct { nodeAmts nodeAmts cltvDelta uint16 + alias []byte + stream *tlv.Stream } @@ -66,6 +68,7 @@ func NewN1() *N1 { tlv.MakePrimitiveRecord(2, &n.scid), tlv.MakeStaticRecord(3, &n.nodeAmts, 49, ENodeAmts, DNodeAmts), tlv.MakePrimitiveRecord(254, &n.cltvDelta), + tlv.MakePrimitiveRecord(401, &n.alias), ) return n @@ -396,6 +399,12 @@ var tlvDecodingFailureTests = []struct { bytes: []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00}, expErr: tlv.ErrStreamNotCanonical, }, + { + name: "absurd record length", + bytes: []byte{0xfd, 0x01, 0x91, 0xfe, 0xff, 0xff, 0xff, 0xff}, + expErr: tlv.ErrRecordTooLarge, + skipN2: true, + }, } // TestTLVDecodingSuccess asserts that the TLV parser fails to decode invalid