Skip to content

Commit 8379d7d

Browse files
authored
security: limit decompressed gzip output in ParseProtoReader and OTLP ingestion path (#7515)
Wrap gzip.Reader with io.LimitReader(maxSize+1) before reading decompressed bytes in both pkg/util/http.go and pkg/util/push/otlp.go. Signed-off-by: Daniel Blando <ddeluigg@amazon.com>
1 parent af9c40c commit 8379d7d

4 files changed

Lines changed: 30 additions & 4 deletions

File tree

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@
3535
* [BUGFIX] Memberlist: Drop incoming TCP transport packets when digest verification fails, preventing corrupted payloads from being forwarded. #7474
3636
* [BUGFIX] Compactor: Fix stale `cortex_bucket_index_last_successful_update_timestamp_seconds` metric not being cleaned up when tenant ownership changes due to ring rebalancing. This caused false alarms on bucket index update rate when a tenant moved between compactors. #7485
3737
* [BUGFIX] Security: Fix stored XSS vulnerability in Alertmanager and Store Gateway status pages by replacing `text/template` with `html/template`. #7512
38+
* [BUGFIX] Security: Limit decompressed gzip output in `ParseProtoReader` and OTLP ingestion path. The decompressed body is now capped by `-distributor.otlp-max-recv-msg-size`. #7515
3839

3940
## 1.21.0 2026-04-24
4041

pkg/util/http.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -220,11 +220,11 @@ func decompressFromReader(reader io.Reader, expectedSize, maxSize int, compressi
220220
}
221221
body, err = decompressFromBuffer(&buf, maxSize, RawSnappy, sp)
222222
case Gzip:
223-
reader, err = gzip.NewReader(reader)
224-
if err != nil {
225-
return nil, err
223+
gzReader, gzErr := gzip.NewReader(reader)
224+
if gzErr != nil {
225+
return nil, gzErr
226226
}
227-
_, err = buf.ReadFrom(reader)
227+
_, err = buf.ReadFrom(io.LimitReader(gzReader, int64(maxSize)+1))
228228
body = buf.Bytes()
229229
}
230230
return body, err

pkg/util/http_test.go

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ package util_test
22

33
import (
44
"bytes"
5+
"compress/gzip"
56
"context"
67
"html/template"
78
"io"
@@ -220,3 +221,26 @@ func TestIsRequestBodyTooLargeRegression(t *testing.T) {
220221
_, err := io.ReadAll(http.MaxBytesReader(httptest.NewRecorder(), io.NopCloser(bytes.NewReader([]byte{1, 2, 3, 4})), 1))
221222
assert.True(t, util.IsRequestBodyTooLarge(err))
222223
}
224+
225+
func TestParseProtoReader_GzipDecompressionBomb(t *testing.T) {
226+
// Create a gzip payload where decompressed size far exceeds maxSize.
227+
const maxSize = 4096 // 4 KB limit on decompressed output
228+
uncompressed := make([]byte, 1<<20) // 1 MB of zeros
229+
230+
var compressed bytes.Buffer
231+
gzw := gzip.NewWriter(&compressed)
232+
_, err := gzw.Write(uncompressed)
233+
require.NoError(t, err)
234+
require.NoError(t, gzw.Close())
235+
236+
// The compressed payload is small enough to pass the compressed-size limit,
237+
// but decompresses to far more than maxSize.
238+
require.Less(t, compressed.Len(), maxSize)
239+
240+
var fromWire cortexpb.PreallocWriteRequest
241+
err = util.ParseProtoReader(context.Background(), io.NopCloser(&compressed), 0, maxSize, &fromWire, util.Gzip)
242+
// The decompressed output should be limited to maxSize+1 bytes, causing a
243+
// proto unmarshal error (not an OOM). The key assertion is that we don't
244+
// allocate 1 MB of memory.
245+
assert.NotNil(t, err)
246+
}

pkg/util/push/otlp.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -160,6 +160,7 @@ func decodeOTLPWriteRequest(ctx context.Context, r *http.Request, maxSize int) (
160160
if err != nil {
161161
return req, err
162162
}
163+
reader = io.LimitReader(reader, int64(maxSize)+1)
163164
}
164165

165166
var buf bytes.Buffer

0 commit comments

Comments
 (0)