Skip to content

Commit b6bcfe8

Browse files
fix(storage): close HTTP2 response bodies to prevent flow-control leaks (#14324)
Fixes an issue where HTTP2 stream connections might exhaust flow-control quota and cause INTERNAL_ERROR in long-lived connections. - Modifies `httpReader.Read` to properly close the previous stream body before reopening it. - Adds a defer block in `parseReadResponse` to ensure response bodies aren't leaked when an error is returned during header parsing. - Ensures bodies are closed when stream discarding (`io.CopyN`) or `X-Goog-Generation` header parsing fails in `readerReopen`. These fixes apply equally to both the JSON and XML reader code paths. --------- Co-authored-by: google-labs-jules[bot] <161369871+google-labs-jules[bot]@users.noreply.github.com> Co-authored-by: krishnamd-jkp <230955344+krishnamd-jkp@users.noreply.github.com>
1 parent 01b7e13 commit b6bcfe8

1 file changed

Lines changed: 18 additions & 6 deletions

File tree

storage/http_client.go

Lines changed: 18 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1424,12 +1424,16 @@ func (r *httpReader) Read(p []byte) (int, error) {
14241424
// Read failed (likely due to connection issues), but we will try to reopen
14251425
// the pipe and continue. Send a ranged read request that takes into account
14261426
// the number of bytes we've already seen.
1427+
1428+
// Close the current body before retrying. Otherwise we leave a
1429+
// connection open and the retry might fail due to quota issues.
1430+
r.body.Close()
1431+
14271432
res, err := r.reopen(r.seen)
14281433
if err != nil {
14291434
// reopen already retries
14301435
return n, err
14311436
}
1432-
r.body.Close()
14331437
r.body = res.Body
14341438
}
14351439
return n, nil
@@ -1523,6 +1527,7 @@ func readerReopen(ctx context.Context, header http.Header, params *newRangeReade
15231527
if params.gen < 0 && res.Header.Get("X-Goog-Generation") != "" {
15241528
gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64)
15251529
if err != nil {
1530+
res.Body.Close()
15261531
return err
15271532
}
15281533
params.gen = gen64
@@ -1536,8 +1541,12 @@ func readerReopen(ctx context.Context, header http.Header, params *newRangeReade
15361541
}
15371542
}
15381543

1539-
func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen func(int64) (*http.Response, error)) (*Reader, error) {
1540-
var err error
1544+
func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen func(int64) (*http.Response, error)) (r *Reader, err error) {
1545+
defer func() {
1546+
if err != nil {
1547+
res.Body.Close()
1548+
}
1549+
}()
15411550
var (
15421551
size int64 // total size of object, even if a range was requested.
15431552
checkCRC bool
@@ -1547,20 +1556,23 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen
15471556
if res.StatusCode == http.StatusPartialContent {
15481557
cr := strings.TrimSpace(res.Header.Get("Content-Range"))
15491558
if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") {
1550-
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
1559+
err = fmt.Errorf("storage: invalid Content-Range %q", cr)
1560+
return nil, err
15511561
}
15521562
// Content range is formatted <first byte>-<last byte>/<total size>. We take
15531563
// the total size.
15541564
size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64)
15551565
if err != nil {
1556-
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
1566+
err = fmt.Errorf("storage: invalid Content-Range %q: %w", cr, err)
1567+
return nil, err
15571568
}
15581569

15591570
dashIndex := strings.Index(cr, "-")
15601571
if dashIndex >= 0 {
15611572
startOffset, err = strconv.ParseInt(cr[len("bytes="):dashIndex], 10, 64)
15621573
if err != nil {
1563-
return nil, fmt.Errorf("storage: invalid Content-Range %q: %w", cr, err)
1574+
err = fmt.Errorf("storage: invalid Content-Range %q: %w", cr, err)
1575+
return nil, err
15641576
}
15651577
}
15661578
} else {

0 commit comments

Comments
 (0)