Skip to content

Commit ae7e19a

Browse files
committed
chore: additional linting updates
1 parent 711321b commit ae7e19a

File tree

4 files changed

+40
-27
lines changed

4 files changed

+40
-27
lines changed

packages/google-cloud-storage/google/cloud/storage/asyncio/async_multi_range_downloader.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -230,7 +230,6 @@ def __init__(
230230
self.persisted_size: Optional[int] = None # updated after opening the stream
231231
self._open_retries: int = 0
232232

233-
234233
async def __aenter__(self):
235234
"""Opens the underlying bidi-gRPC connection to read from the object."""
236235
await self.open()
@@ -259,6 +258,7 @@ async def open(
259258
raise ValueError("Underlying bidi-gRPC stream is already open")
260259

261260
if retry_policy is None:
261+
262262
def on_error_wrapper(exc):
263263
self._open_retries += 1
264264
self._on_open_error(exc)

packages/google-cloud-storage/tests/perf/microbenchmarks/_utils.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,8 @@
1818
import socket
1919
import psutil
2020

21-
_C4_STANDARD_192_NIC = "ens3" # can be fetched via ip link show
21+
_C4_STANDARD_192_NIC = "ens3" # can be fetched via ip link show
22+
2223

2324
def publish_benchmark_extra_info(
2425
benchmark: Any,
@@ -28,7 +29,6 @@ def publish_benchmark_extra_info(
2829
download_bytes_list: Optional[List[int]] = None,
2930
duration: Optional[int] = None,
3031
) -> None:
31-
3232
"""
3333
Helper function to publish benchmark parameters to the extra_info property.
3434
"""
@@ -48,14 +48,15 @@ def publish_benchmark_extra_info(
4848
benchmark.group = benchmark_group
4949

5050
if download_bytes_list is not None:
51-
assert duration is not None, "Duration must be provided if total_bytes_transferred is provided."
51+
assert (
52+
duration is not None
53+
), "Duration must be provided if total_bytes_transferred is provided."
5254
throughputs_list = [x / duration / (1024 * 1024) for x in download_bytes_list]
5355
min_throughput = min(throughputs_list)
5456
max_throughput = max(throughputs_list)
5557
mean_throughput = statistics.mean(throughputs_list)
5658
median_throughput = statistics.median(throughputs_list)
5759

58-
5960
else:
6061
object_size = params.file_size_bytes
6162
num_files = params.num_files
@@ -211,13 +212,13 @@ def get_affinity(irq):
211212

212213
def get_primary_interface_name():
213214
primary_ip = None
214-
215+
215216
# 1. Determine the Local IP used for internet access
216217
# We use UDP (SOCK_DGRAM) so we don't actually send a handshake/packet
217218
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
218219
try:
219220
# connect() to a public IP (Google DNS) to force route resolution
220-
s.connect(('8.8.8.8', 80))
221+
s.connect(("8.8.8.8", 80))
221222
primary_ip = s.getsockname()[0]
222223
except Exception:
223224
# Fallback if no internet
@@ -248,7 +249,7 @@ def get_irq_affinity():
248249
for irq in irqs:
249250
affinity_str = get_affinity(irq)
250251
if affinity_str != "N/A":
251-
for part in affinity_str.split(','):
252-
if '-' not in part:
252+
for part in affinity_str.split(","):
253+
if "-" not in part:
253254
cpus.add(int(part))
254255
return cpus

packages/google-cloud-storage/tests/system/test_zonal.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,9 @@ async def _run():
104104
object_data = os.urandom(object_size)
105105
object_checksum = google_crc32c.value(object_data)
106106

107-
writer = AsyncAppendableObjectWriter(grpc_client, _CROSS_REGION_BUCKET, object_name)
107+
writer = AsyncAppendableObjectWriter(
108+
grpc_client, _CROSS_REGION_BUCKET, object_name
109+
)
108110
await writer.open()
109111
await writer.append(object_data)
110112
object_metadata = await writer.close(finalize_on_close=True)
@@ -122,12 +124,15 @@ async def _run():
122124
assert buffer.getvalue() == object_data
123125

124126
# Clean up; use json client (i.e. `storage_client` fixture) to delete.
125-
blobs_to_delete.append(storage_client.bucket(_CROSS_REGION_BUCKET).blob(object_name))
127+
blobs_to_delete.append(
128+
storage_client.bucket(_CROSS_REGION_BUCKET).blob(object_name)
129+
)
126130
del writer
127131
gc.collect()
128132

129133
event_loop.run_until_complete(_run())
130134

135+
131136
@pytest.mark.parametrize(
132137
"object_size",
133138
[

packages/google-cloud-storage/tests/unit/asyncio/test_async_appendable_object_writer.py

Lines changed: 23 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -175,9 +175,9 @@ async def test_state_lookup(self, mock_appendable_writer):
175175
writer._is_stream_open = True
176176
writer.write_obj_stream = mock_appendable_writer["mock_stream"]
177177

178-
mock_appendable_writer["mock_stream"].recv.return_value = (
179-
storage_type.BidiWriteObjectResponse(persisted_size=100)
180-
)
178+
mock_appendable_writer[
179+
"mock_stream"
180+
].recv.return_value = storage_type.BidiWriteObjectResponse(persisted_size=100)
181181

182182
size = await writer.state_lookup()
183183

@@ -246,9 +246,7 @@ async def test_append_data_less_than_flush_interval(self, mock_appendable_writer
246246
],
247247
)
248248
@pytest.mark.asyncio
249-
async def test_append(
250-
self, data_len, mock_appendable_writer
251-
):
249+
async def test_append(self, data_len, mock_appendable_writer):
252250
"""Verify append orchestrates manager and drives the internal generator."""
253251
# Arrange
254252
writer = self._make_one(mock_appendable_writer["mock_client"])
@@ -272,10 +270,19 @@ async def test_append(
272270
# Assert
273271
expected_recv_count = data_len // _DEFAULT_FLUSH_INTERVAL_BYTES
274272
assert writer.offset == data_len
275-
assert writer.bytes_appended_since_last_flush == data_len % _DEFAULT_FLUSH_INTERVAL_BYTES
276-
assert writer.persisted_size == expected_recv_count*_DEFAULT_FLUSH_INTERVAL_BYTES
277-
assert writer.write_obj_stream.send.await_count == -(-data_len // _MAX_CHUNK_SIZE_BYTES) # Ceiling division for number of chunks
278-
assert writer.write_obj_stream.recv.await_count == expected_recv_count # Expect 1 recv per flush interval
273+
assert (
274+
writer.bytes_appended_since_last_flush
275+
== data_len % _DEFAULT_FLUSH_INTERVAL_BYTES
276+
)
277+
assert (
278+
writer.persisted_size == expected_recv_count * _DEFAULT_FLUSH_INTERVAL_BYTES
279+
)
280+
assert writer.write_obj_stream.send.await_count == -(
281+
-data_len // _MAX_CHUNK_SIZE_BYTES
282+
) # Ceiling division for number of chunks
283+
assert (
284+
writer.write_obj_stream.recv.await_count == expected_recv_count
285+
) # Expect 1 recv per flush interval
279286

280287
@pytest.mark.asyncio
281288
async def test_append_recovery_reopens_stream(self, mock_appendable_writer):
@@ -339,9 +346,9 @@ async def test_flush_resets_counters(self, mock_appendable_writer):
339346
writer.write_obj_stream = mock_appendable_writer["mock_stream"]
340347
writer.bytes_appended_since_last_flush = 100
341348

342-
mock_appendable_writer["mock_stream"].recv.return_value = (
343-
storage_type.BidiWriteObjectResponse(persisted_size=200)
344-
)
349+
mock_appendable_writer[
350+
"mock_stream"
351+
].recv.return_value = storage_type.BidiWriteObjectResponse(persisted_size=200)
345352

346353
await writer.flush()
347354

@@ -382,9 +389,9 @@ async def test_finalize_lifecycle(self, mock_appendable_writer):
382389
writer.write_obj_stream = mock_appendable_writer["mock_stream"]
383390

384391
resource = storage_type.Object(size=999)
385-
mock_appendable_writer["mock_stream"].recv.return_value = (
386-
storage_type.BidiWriteObjectResponse(resource=resource)
387-
)
392+
mock_appendable_writer[
393+
"mock_stream"
394+
].recv.return_value = storage_type.BidiWriteObjectResponse(resource=resource)
388395

389396
res = await writer.finalize()
390397

0 commit comments

Comments
 (0)