Skip to content

Commit b43f229

Browse files
committed
chore: lint
1 parent b4995ab commit b43f229

2 files changed

Lines changed: 15 additions & 37 deletions

File tree

src/zarr/codecs/sharding.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -536,7 +536,6 @@ def _encode_sync(
536536
morton_order_iter(chunks_per_shard)
537537
)
538538

539-
chunk_spec = self._get_chunk_spec(shard_spec)
540539
skip_empty = not shard_spec.config.write_empty_chunks
541540
fill_value = shard_spec.fill_value
542541
if fill_value is None:

src/zarr/core/codec_pipeline.py

Lines changed: 15 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,6 @@
88
from typing import TYPE_CHECKING, Any
99
from warnings import warn
1010

11-
import numpy as np
12-
1311
from zarr.abc.codec import (
1412
ArrayArrayCodec,
1513
ArrayBytesCodec,
@@ -21,8 +19,6 @@
2119
GetResult,
2220
SupportsSyncCodec,
2321
)
24-
from zarr.core.array_spec import ArraySpec
25-
from zarr.core.buffer import numpy_buffer_prototype
2622
from zarr.core.common import concurrent_map
2723
from zarr.core.config import config
2824
from zarr.core.indexing import SelectorTuple, is_scalar
@@ -33,7 +29,8 @@
3329
from collections.abc import Iterable, Iterator
3430
from typing import Self
3531

36-
from zarr.abc.store import ByteGetter, ByteSetter, RangeByteRequest
32+
from zarr.abc.store import ByteGetter, ByteSetter
33+
from zarr.core.array_spec import ArraySpec
3734
from zarr.core.buffer import Buffer, BufferPrototype, NDBuffer
3835
from zarr.core.dtype.wrapper import TBaseDType, TBaseScalar, ZDType
3936
from zarr.core.metadata.v3 import ChunkGridMetadata
@@ -799,9 +796,7 @@ def from_codecs(cls, codecs: Iterable[Codec], *, batch_size: int | None = None)
799796
)
800797

801798
def evolve_from_array_spec(self, array_spec: ArraySpec) -> Self:
802-
evolved_codecs = tuple(
803-
c.evolve_from_array_spec(array_spec=array_spec) for c in self.codecs
804-
)
799+
evolved_codecs = tuple(c.evolve_from_array_spec(array_spec=array_spec) for c in self.codecs)
805800
aa, ab, bb = codecs_from_list(evolved_codecs)
806801

807802
try:
@@ -924,8 +919,7 @@ def _merge_chunk_array(
924919
chunk_value = value[out_selection]
925920
if drop_axes:
926921
item = tuple(
927-
None if idx in drop_axes else slice(None)
928-
for idx in range(chunk_spec.ndim)
922+
None if idx in drop_axes else slice(None) for idx in range(chunk_spec.ndim)
929923
)
930924
chunk_value = chunk_value[item]
931925
chunk_array[chunk_selection] = chunk_value
@@ -967,9 +961,7 @@ def _decode_one(raw: Buffer | None, chunk_spec: ArraySpec) -> NDBuffer | None:
967961
if raw is None:
968962
return None
969963
chunk_shape = (
970-
chunk_spec.shape
971-
if chunk_spec.shape != transform.array_spec.shape
972-
else None
964+
chunk_spec.shape if chunk_spec.shape != transform.array_spec.shape else None
973965
)
974966
return transform.decode_chunk(raw, chunk_shape=chunk_shape)
975967

@@ -978,7 +970,9 @@ def _decode_one(raw: Buffer | None, chunk_spec: ArraySpec) -> NDBuffer | None:
978970
with ThreadPoolExecutor(max_workers=n_workers) as pool:
979971
decoded_list = list(pool.map(_decode_one, raw_buffers, specs))
980972
else:
981-
decoded_list = [_decode_one(raw, spec) for raw, spec in zip(raw_buffers, specs, strict=True)]
973+
decoded_list = [
974+
_decode_one(raw, spec) for raw, spec in zip(raw_buffers, specs, strict=True)
975+
]
982976

983977
# Phase 3: scatter (sequential, writes to shared output buffer)
984978
results: list[GetResult] = []
@@ -1030,9 +1024,7 @@ def _process_one(
10301024
_, chunk_spec, chunk_selection, out_selection, is_complete = batch[idx]
10311025
existing_bytes = existing_buffers[idx]
10321026
chunk_shape = (
1033-
chunk_spec.shape
1034-
if chunk_spec.shape != transform.array_spec.shape
1035-
else None
1027+
chunk_spec.shape if chunk_spec.shape != transform.array_spec.shape else None
10361028
)
10371029

10381030
existing_chunk_array: NDBuffer | None = None
@@ -1099,19 +1091,14 @@ async def read(
10991091

11001092
# Async fallback: fetch all chunks, decode via async codec API, scatter
11011093
chunk_bytes_batch = await concurrent_map(
1102-
[
1103-
(byte_getter, array_spec.prototype)
1104-
for byte_getter, array_spec, *_ in batch
1105-
],
1094+
[(byte_getter, array_spec.prototype) for byte_getter, array_spec, *_ in batch],
11061095
lambda byte_getter, prototype: byte_getter.get(prototype),
11071096
config.get("async.concurrency"),
11081097
)
11091098
chunk_array_batch = await self.decode(
11101099
[
11111100
(chunk_bytes, chunk_spec)
1112-
for chunk_bytes, (_, chunk_spec, *_) in zip(
1113-
chunk_bytes_batch, batch, strict=False
1114-
)
1101+
for chunk_bytes, (_, chunk_spec, *_) in zip(chunk_bytes_batch, batch, strict=False)
11151102
],
11161103
)
11171104
results: list[GetResult] = []
@@ -1175,9 +1162,7 @@ async def _read_key(
11751162
chunk_array_decoded = await self.decode(
11761163
[
11771164
(chunk_bytes, chunk_spec)
1178-
for chunk_bytes, (_, chunk_spec, *_) in zip(
1179-
chunk_bytes_batch, batch, strict=False
1180-
)
1165+
for chunk_bytes, (_, chunk_spec, *_) in zip(chunk_bytes_batch, batch, strict=False)
11811166
],
11821167
)
11831168

@@ -1200,9 +1185,7 @@ async def _read_key(
12001185
) in zip(chunk_array_decoded, batch, strict=False)
12011186
]
12021187
chunk_array_batch: list[NDBuffer | None] = []
1203-
for chunk_array, (_, chunk_spec, *_) in zip(
1204-
chunk_array_merged, batch, strict=False
1205-
):
1188+
for chunk_array, (_, chunk_spec, *_) in zip(chunk_array_merged, batch, strict=False):
12061189
if chunk_array is None:
12071190
chunk_array_batch.append(None) # type: ignore[unreachable]
12081191
else:
@@ -1216,9 +1199,7 @@ async def _read_key(
12161199
chunk_bytes_batch = await self.encode(
12171200
[
12181201
(chunk_array, chunk_spec)
1219-
for chunk_array, (_, chunk_spec, *_) in zip(
1220-
chunk_array_batch, batch, strict=False
1221-
)
1202+
for chunk_array, (_, chunk_spec, *_) in zip(chunk_array_batch, batch, strict=False)
12221203
],
12231204
)
12241205

@@ -1231,9 +1212,7 @@ async def _write_key(byte_setter: ByteSetter, chunk_bytes: Buffer | None) -> Non
12311212
await concurrent_map(
12321213
[
12331214
(byte_setter, chunk_bytes)
1234-
for chunk_bytes, (byte_setter, *_) in zip(
1235-
chunk_bytes_batch, batch, strict=False
1236-
)
1215+
for chunk_bytes, (byte_setter, *_) in zip(chunk_bytes_batch, batch, strict=False)
12371216
],
12381217
_write_key,
12391218
config.get("async.concurrency"),

0 commit comments

Comments
 (0)