@@ -259,11 +259,11 @@ def _chunk_ds(
259259 name ,
260260 var ,
261261 var_chunks ,
262+ chunkmanager ,
262263 overwrite_encoded_chunks = overwrite_encoded_chunks ,
263264 name_prefix = name_prefix ,
264265 token = token ,
265266 inline_array = inline_array ,
266- chunked_array_type = chunkmanager ,
267267 from_array_kwargs = from_array_kwargs .copy (),
268268 just_use_token = True ,
269269 )
@@ -292,9 +292,9 @@ def _dataset_from_backend_dataset(
292292 create_default_indexes ,
293293 ** extra_tokens ,
294294):
295- if not isinstance (chunks , int | dict ) and chunks not in {None , "auto" , "preserve" }:
295+ if not isinstance (chunks , int | dict ) and chunks not in {None , "auto" }:
296296 raise ValueError (
297- f"chunks must be an int, dict, 'auto', 'preserve', or None. Instead found { chunks } ."
297+ f"chunks must be an int, dict, 'auto', or None. Instead found { chunks } ."
298298 )
299299
300300 _protect_dataset_variables_inplace (backend_ds , cache )
@@ -430,14 +430,14 @@ def open_dataset(
430430 "netcdf4" over "h5netcdf" over "scipy" (customizable via
431431 ``netcdf_engine_order`` in ``xarray.set_options()``). A custom backend
432432 class (a subclass of ``BackendEntrypoint``) can also be used.
433- chunks : int, dict, 'auto', 'preserve ' or None, default: None
433+ chunks : int, dict, 'auto', 'dask-auto ' or None, default: None
434434 If provided, used to load the data into dask arrays.
435435
436- - ``chunks="auto"`` will use dask ``auto`` chunking taking into account the
437- engine preferred chunks.
438- - ``chunks="preserve"`` will use a chunking scheme that never splits encoded
439- chunks. If encoded chunks are small then "preserve" takes multiples of them
436+ - ``chunks="auto"`` will use a chunking scheme that never splits encoded
437+ chunks. If encoded chunks are small then "auto" takes multiples of them
440438 over the largest dimension.
439+ - ``chunks="dask-auto"`` will use dask ``auto`` chunking taking into account the
440+ engine preferred chunks.
441441 - ``chunks=None`` skips using dask. This uses xarray's internally private
442442 :ref:`lazy indexing classes <internal design.lazy indexing>`,
443443 but data is eagerly loaded into memory as numpy arrays when accessed.
@@ -677,14 +677,14 @@ def open_dataarray(
677677 "netcdf4" over "h5netcdf" over "scipy" (customizable via
678678 ``netcdf_engine_order`` in ``xarray.set_options()``). A custom backend
679679 class (a subclass of ``BackendEntrypoint``) can also be used.
680- chunks : int, dict, 'auto', 'preserve ', or None, default: None
680+ chunks : int, dict, 'auto', 'dask-auto ', or None, default: None
681681 If provided, used to load the data into dask arrays.
682682
683- - ``chunks='auto'`` will use dask ``auto`` chunking taking into account the
684- engine preferred chunks.
685- - ``chunks="preserve"`` will use a chunking scheme that never splits encoded
686- chunks. If encoded chunks are small then "preserve" takes multiples of them
683+ - ``chunks="auto"`` will use a chunking scheme that never splits encoded
684+ chunks. If encoded chunks are small then "auto" takes multiples of them
687685 over the largest dimension.
686+ - ``chunks='dask-auto'`` will use dask ``auto`` chunking taking into account the
687+ engine preferred chunks.
688688 - ``chunks=None`` skips using dask. This uses xarray's internally private
689689 :ref:`lazy indexing classes <internal design.lazy indexing>`,
690690 but data is eagerly loaded into memory as numpy arrays when accessed.
@@ -906,13 +906,13 @@ def open_datatree(
906906 "h5netcdf" over "netcdf4" (customizable via ``netcdf_engine_order`` in
907907 ``xarray.set_options()``). A custom backend class (a subclass of
908908 ``BackendEntrypoint``) can also be used.
909- chunks : int, dict, 'auto', preserve , or None, default: None
909+ chunks : int, dict, 'auto', 'dask-auto' , or None, default: None
910910 If provided, used to load the data into dask arrays.
911911
912- - ``chunks="auto"`` will use dask ``auto`` chunking taking into account the
912+ - ``chunks="dask- auto"`` will use dask ``auto`` chunking taking into account the
913913 engine preferred chunks.
914- - ``chunks="preserve "`` will use a chunking scheme that never splits encoded
915- chunks. If encoded chunks are small then "preserve " takes multiples of them
914+ - ``chunks="auto "`` will use a chunking scheme that never splits encoded
915+ chunks. If encoded chunks are small then "auto " takes multiples of them
916916 over the largest dimension.
917917 - ``chunks=None`` skips using dask. This uses xarray's internally private
918918 :ref:`lazy indexing classes <internal design.lazy indexing>`,
@@ -1155,14 +1155,14 @@ def open_groups(
11551155 ``xarray.set_options()``). A custom backend class (a subclass of
11561156 ``BackendEntrypoint``) can also be used.
11571157 can also be used.
1158- chunks : int, dict, 'auto', 'preserve ', or None, default: None
1158+ chunks : int, dict, 'auto', 'dask-auto ', or None, default: None
11591159 If provided, used to load the data into dask arrays.
11601160
1161- - ``chunks="auto"`` will use dask ``auto`` chunking taking into account the
1162- engine preferred chunks.
1163- - ``chunks="preserve"`` will use a chunking scheme that never splits encoded
1164- chunks. If encoded chunks are small then "preserve" takes multiples of them
1161+ - ``chunks="auto"`` will use a chunking scheme that never splits encoded
1162+ chunks. If encoded chunks are small then "auto" takes multiples of them
11651163 over the largest dimension.
1164+ - ``chunks="dask-auto"`` will use dask ``auto`` chunking taking into account the
1165+ engine preferred chunks.
11661166 - ``chunks=None`` skips using dask. This uses xarray's internally private
11671167 :ref:`lazy indexing classes <internal design.lazy indexing>`,
11681168 but data is eagerly loaded into memory as numpy arrays when accessed.
@@ -1430,7 +1430,7 @@ def open_mfdataset(
14301430 concatenation along more than one dimension is desired, then ``paths`` must be a
14311431 nested list-of-lists (see ``combine_nested`` for details). (A string glob will
14321432 be expanded to a 1-dimensional list.)
1433- chunks : int, dict, 'auto', 'preserve ', or None, optional
1433+ chunks : int, dict, 'auto', 'dask-auto ', or None, optional
14341434 Dictionary with keys given by dimension names and values given by chunk sizes.
14351435 In general, these should divide the dimensions of each dataset. If int, chunk
14361436 each dimension by ``chunks``. By default, chunks will be chosen to match the
0 commit comments