Skip to content

Commit d4341da

Browse files
committed
test: parametrize test_mrd_concurrent_download for different chunk sizes
1 parent b5974f5 commit d4341da

File tree

1 file changed

+26
-22
lines changed

1 file changed

+26
-22
lines changed

packages/google-cloud-storage/tests/system/test_zonal.py

Lines changed: 26 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -597,21 +597,37 @@ async def _run():
597597
event_loop.run_until_complete(_run())
598598

599599

600+
@pytest.mark.parametrize(
601+
"ranges_desc, chunk_ranges",
602+
[
603+
("small", [(1, 100)] * 3),
604+
("medium", [(100, 100000)] * 3),
605+
("large", [(1000000, 2000000)] * 3),
606+
("mixed", [(1, 100), (100, 100000), (1000000, 2000000)]),
607+
],
608+
)
600609
def test_mrd_concurrent_download(
601-
storage_client, blobs_to_delete, event_loop, grpc_client_direct
610+
storage_client,
611+
blobs_to_delete,
612+
event_loop,
613+
grpc_client_direct,
614+
ranges_desc,
615+
chunk_ranges,
602616
):
603617
"""
604618
Test that mrd can handle concurrent `download_ranges` calls correctly.
605-
Tests overlapping ranges, high concurrency (len > 100 multiplexing batch limits),
606-
mixed random chunk sizes (small/medium/large), and full object fetching alongside specific chunks.
619+
Tests overlapping ranges, minimal concurrency,
620+
parametrized chunk sizes (small/medium/large/mixed), and full object fetching alongside specific chunks.
607621
"""
608622
object_size = 15 * 1024 * 1024 # 15MB
609623
object_name = f"test_mrd_concurrent-{uuid.uuid4()}"
610624

611625
async def _run():
612626
object_data = os.urandom(object_size)
613627

614-
writer = AsyncAppendableObjectWriter(grpc_client_direct, _ZONAL_BUCKET, object_name)
628+
writer = AsyncAppendableObjectWriter(
629+
grpc_client_direct, _ZONAL_BUCKET, object_name
630+
)
615631
await writer.open()
616632
await writer.append(object_data)
617633
await writer.close(finalize_on_close=True)
@@ -622,28 +638,14 @@ async def _run():
622638
tasks = []
623639
ranges_to_fetch = []
624640

625-
# Overlapping ranges & Mixed random chunk sizes
626-
# Small chunks
627-
for _ in range(60):
628-
start = random.randint(0, object_size - 100)
629-
length = random.randint(1, 100)
630-
ranges_to_fetch.append((start, length))
631-
# Medium chunks
632-
for _ in range(60):
633-
start = random.randint(0, object_size - 100000)
634-
length = random.randint(100, 100000)
635-
ranges_to_fetch.append((start, length))
636-
# Large chunks
637-
for _ in range(5):
638-
start = random.randint(0, object_size - 2000000)
639-
length = random.randint(1000000, 2000000)
641+
for min_len, max_len in chunk_ranges:
642+
start = random.randint(0, object_size - max_len)
643+
length = random.randint(min_len, max_len)
640644
ranges_to_fetch.append((start, length))
641645

642646
# Full object fetching concurrently
643647
ranges_to_fetch.append((0, 0))
644648

645-
# High concurrency batching (Total > 100 ranges)
646-
assert len(ranges_to_fetch) > 100
647649
random.shuffle(ranges_to_fetch)
648650

649651
buffers = [BytesIO() for _ in range(len(ranges_to_fetch))]
@@ -667,7 +669,9 @@ async def _run():
667669

668670
del writer
669671
gc.collect()
670-
blobs_to_delete.append(storage_client.bucket(_ZONAL_BUCKET).blob(object_name))
672+
blobs_to_delete.append(
673+
storage_client.bucket(_ZONAL_BUCKET).blob(object_name)
674+
)
671675

672676
event_loop.run_until_complete(_run())
673677

0 commit comments

Comments
 (0)