Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions src/spikeinterface/core/analyzer_extension_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ def _run(self, verbose=False, **job_kwargs):
self.nbefore,
self.nafter,
mode=mode,
return_scaled=self.sorting_analyzer.return_scaled,
return_in_uV=self.sorting_analyzer.return_in_uV,
file_path=file_path,
dtype=self.params["dtype"],
sparsity_mask=sparsity_mask,
Expand All @@ -216,7 +216,7 @@ def _set_params(
if dtype is None:
dtype = recording.get_dtype()

if np.issubdtype(dtype, np.integer) and self.sorting_analyzer.return_scaled:
if np.issubdtype(dtype, np.integer) and self.sorting_analyzer.return_in_uV:
dtype = "float32"

dtype = np.dtype(dtype)
Expand Down Expand Up @@ -427,7 +427,7 @@ def _run(self, verbose=False, **job_kwargs):
# retrieve spike vector and the sampling
some_spikes = self.sorting_analyzer.get_extension("random_spikes").get_random_spikes()

return_scaled = self.sorting_analyzer.return_scaled
return_in_uV = self.sorting_analyzer.return_in_uV

return_std = "std" in self.params["operators"]
output = estimate_templates_with_accumulator(
Expand All @@ -436,7 +436,7 @@ def _run(self, verbose=False, **job_kwargs):
unit_ids,
self.nbefore,
self.nafter,
return_scaled=return_scaled,
return_in_uV=return_in_uV,
return_std=return_std,
verbose=verbose,
**job_kwargs,
Expand Down Expand Up @@ -648,7 +648,7 @@ def get_templates(self, unit_ids=None, operator="average", percentile=None, save
channel_ids=self.sorting_analyzer.channel_ids,
unit_ids=unit_ids,
probe=self.sorting_analyzer.get_probe(),
is_scaled=self.sorting_analyzer.return_scaled,
is_scaled=self.sorting_analyzer.return_in_uV,
)
else:
raise ValueError("`outputs` must be 'numpy' or 'Templates'")
Expand Down Expand Up @@ -732,7 +732,7 @@ def _merge_extension_data(
def _run(self, verbose=False, **job_kwargs):
self.data["noise_levels"] = get_noise_levels(
self.sorting_analyzer.recording,
return_scaled=self.sorting_analyzer.return_scaled,
return_in_uV=self.sorting_analyzer.return_in_uV,
**self.params,
**job_kwargs,
)
Expand Down
24 changes: 19 additions & 5 deletions src/spikeinterface/core/baserecording.py
Original file line number Diff line number Diff line change
Expand Up @@ -287,7 +287,8 @@ def get_traces(
end_frame: int | None = None,
channel_ids: list | np.array | tuple | None = None,
order: "C" | "F" | None = None,
return_scaled: bool = False,
return_scaled: bool | None = None,
return_in_uV: bool = False,
cast_unsigned: bool = False,
) -> np.ndarray:
"""Returns traces from recording.
Expand All @@ -304,7 +305,11 @@ def get_traces(
The channel ids. If None, all channels are used, default: None
order : "C" | "F" | None, default: None
The order of the traces ("C" | "F"). If None, traces are returned as they are
return_scaled : bool, default: False
return_scaled : bool | None, default: None
DEPRECATED. Use return_in_uV instead.
If True and the recording has scaling (gain_to_uV and offset_to_uV properties),
traces are scaled to uV
return_in_uV : bool, default: False
If True and the recording has scaling (gain_to_uV and offset_to_uV properties),
traces are scaled to uV
cast_unsigned : bool, default: False
Expand All @@ -319,7 +324,7 @@ def get_traces(
Raises
------
ValueError
If return_scaled is True, but recording does not have scaled traces
If return_in_uV is True, but recording does not have scaled traces
"""
segment_index = self._check_segment_index(segment_index)
channel_indices = self.ids_to_indices(channel_ids, prefer_slice=True)
Expand All @@ -343,15 +348,24 @@ def get_traces(
traces = traces.astype(f"int{2 * (dtype.itemsize) * 8}") - 2 ** (nbits - 1)
traces = traces.astype(f"int{dtype.itemsize * 8}")

if return_scaled:
# Handle deprecated return_scaled parameter
if return_scaled is not None:
warnings.warn(
"`return_scaled` is deprecated and will be removed in a future version. Use `return_in_uV` instead.",
category=DeprecationWarning,
stacklevel=2,
)
return_in_uV = return_scaled

if return_in_uV:
if not self.has_scaleable_traces():
if self._dtype.kind == "f":
# here we do not truely have scale but we assume this is scaled
# this helps a lot for simulated data
pass
else:
raise ValueError(
"This recording does not support return_scaled=True (need gain_to_uV and offset_"
"This recording does not support return_in_uV=True (need gain_to_uV and offset_"
"to_uV properties)"
)
else:
Expand Down
80 changes: 75 additions & 5 deletions src/spikeinterface/core/basesnippets.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,17 +97,51 @@ def get_snippets(
indices=None,
segment_index: Union[int, None] = None,
channel_ids: Union[list, None] = None,
return_scaled=False,
return_scaled: bool | None = None,
return_in_uV: bool = False,
):
"""
Return the snippets, optionally for a subset of samples and/or channels

Parameters
----------
indices : list[int], default: None
Indices of the snippets to return. If None, all snippets are returned.
segment_index : Union[int, None], default: None
The segment index to get snippets from. If snippets is multi-segment, it is required.
channel_ids : Union[list, None], default: None
The channel ids. If None, all channels are used.
return_scaled : bool | None, default: None
DEPRECATED. Use return_in_uV instead.
If True and the snippets has scaling (gain_to_uV and offset_to_uV properties),
snippets are scaled to uV
return_in_uV : bool, default: False
If True and the snippets has scaling (gain_to_uV and offset_to_uV properties),
snippets are scaled to uV

Returns
-------
np.array
The snippets (num_snippets, num_samples, num_channels)
"""
segment_index = self._check_segment_index(segment_index)
spts = self._snippets_segments[segment_index]
channel_indices = self.ids_to_indices(channel_ids, prefer_slice=True)
wfs = spts.get_snippets(indices, channel_indices=channel_indices)

if return_scaled:
# Handle deprecated return_scaled parameter
if return_scaled is not None:
warn(
"`return_scaled` is deprecated and will be removed in a future version. Use `return_in_uV` instead.",
category=DeprecationWarning,
stacklevel=2,
)
return_in_uV = return_scaled

if return_in_uV:
if not self.has_scaleable_traces():
raise ValueError(
"These snippets do not support return_scaled=True (need gain_to_uV and offset_" "to_uV properties)"
"These snippets do not support return_in_uV=True (need gain_to_uV and offset_" "to_uV properties)"
)
else:
gains = self.get_property("gain_to_uV")
Expand All @@ -123,13 +157,49 @@ def get_snippets_from_frames(
start_frame: Union[int, None] = None,
end_frame: Union[int, None] = None,
channel_ids: Union[list, None] = None,
return_scaled=False,
return_scaled: bool | None = None,
return_in_uV: bool = False,
):
"""
Return the snippets from frames, optionally for a subset of samples and/or channels

Parameters
----------
segment_index : Union[int, None], default: None
The segment index to get snippets from. If snippets is multi-segment, it is required.
start_frame : Union[int, None], default: None
The start frame. If None, 0 is used.
end_frame : Union[int, None], default: None
The end frame. If None, the number of samples in the segment is used.
channel_ids : Union[list, None], default: None
The channel ids. If None, all channels are used.
return_scaled : bool | None, default: None
DEPRECATED. Use return_in_uV instead.
If True and the snippets has scaling (gain_to_uV and offset_to_uV properties),
snippets are scaled to uV
return_in_uV : bool, default: False
If True and the snippets has scaling (gain_to_uV and offset_to_uV properties),
snippets are scaled to uV

Returns
-------
np.array
The snippets (num_snippets, num_samples, num_channels)
"""
segment_index = self._check_segment_index(segment_index)
spts = self._snippets_segments[segment_index]
indices = spts.frames_to_indices(start_frame, end_frame)

return self.get_snippets(indices, channel_ids=channel_ids, return_scaled=return_scaled)
# Handle deprecated return_scaled parameter
if return_scaled is not None:
warn(
"`return_scaled` is deprecated and will be removed in a future version. Use `return_in_uV` instead.",
category=DeprecationWarning,
stacklevel=2,
)
return_in_uV = return_scaled

return self.get_snippets(indices, channel_ids=channel_ids, return_in_uV=return_in_uV)

def _save(self, format="binary", **save_kwargs):
raise NotImplementedError
Expand Down
51 changes: 39 additions & 12 deletions src/spikeinterface/core/recording_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -380,7 +380,8 @@ def write_to_h5_dataset_format(
chunk_memory="500M",
verbose=False,
auto_cast_uint=True,
return_scaled=False,
return_scaled=None,
return_in_uV=False,
):
"""
Save the traces of a recording extractor in an h5 dataset.
Expand Down Expand Up @@ -414,7 +415,11 @@ def write_to_h5_dataset_format(
If True, output is verbose (when chunks are used)
auto_cast_uint : bool, default: True
If True, unsigned integers are automatically cast to int if the specified dtype is signed
return_scaled : bool, default: False
return_scaled : bool | None, default: None
DEPRECATED. Use return_in_uV instead.
If True and the recording has scaling (gain_to_uV and offset_to_uV properties),
traces are dumped to uV
return_in_uV : bool, default: False
If True and the recording has scaling (gain_to_uV and offset_to_uV properties),
traces are dumped to uV
"""
Expand Down Expand Up @@ -459,7 +464,15 @@ def write_to_h5_dataset_format(
chunk_size = ensure_chunk_size(recording, chunk_size=chunk_size, chunk_memory=chunk_memory, n_jobs=1)

if chunk_size is None:
traces = recording.get_traces(cast_unsigned=cast_unsigned, return_scaled=return_scaled)
# Handle deprecated return_scaled parameter
if return_scaled is not None:
warnings.warn(
"`return_scaled` is deprecated and will be removed in a future version. Use `return_in_uV` instead.",
category=DeprecationWarning,
)
return_in_uV = return_scaled

traces = recording.get_traces(cast_unsigned=cast_unsigned, return_scaled=return_in_uV)
if dtype is not None:
traces = traces.astype(dtype_file, copy=False)
if time_axis == 1:
Expand All @@ -484,7 +497,7 @@ def write_to_h5_dataset_format(
start_frame=i * chunk_size,
end_frame=min((i + 1) * chunk_size, num_frames),
cast_unsigned=cast_unsigned,
return_scaled=return_scaled,
return_scaled=return_in_uV if return_scaled is None else return_scaled,
)
chunk_frames = traces.shape[0]
if dtype is not None:
Expand Down Expand Up @@ -599,7 +612,9 @@ def get_random_recording_slices(
return recording_slices


def get_random_data_chunks(recording, return_scaled=False, concatenated=True, **random_slices_kwargs):
def get_random_data_chunks(
recording, return_scaled=None, return_in_uV=False, concatenated=True, **random_slices_kwargs
):
"""
Extract random chunks across segments.

Expand Down Expand Up @@ -636,7 +651,7 @@ def get_random_data_chunks(recording, return_scaled=False, concatenated=True, **
start_frame=start_frame,
end_frame=end_frame,
segment_index=segment_index,
return_scaled=return_scaled,
return_scaled=return_in_uV if return_scaled is None else return_scaled,
)
chunk_list.append(traces_chunk)

Expand Down Expand Up @@ -713,17 +728,18 @@ def _noise_level_chunk(segment_index, start_frame, end_frame, worker_ctx):
return noise_levels


def _noise_level_chunk_init(recording, return_scaled, method):
def _noise_level_chunk_init(recording, return_in_uV, method):
worker_ctx = {}
worker_ctx["recording"] = recording
worker_ctx["return_scaled"] = return_scaled
worker_ctx["return_scaled"] = return_in_uV
worker_ctx["method"] = method
return worker_ctx


def get_noise_levels(
recording: "BaseRecording",
return_scaled: bool = True,
return_scaled: bool | None = None,
return_in_uV: bool = True,
method: Literal["mad", "std"] = "mad",
force_recompute: bool = False,
random_slices_kwargs: dict = {},
Expand All @@ -745,7 +761,10 @@ def get_noise_levels(

recording : BaseRecording
The recording extractor to get noise levels
return_scaled : bool
return_scaled : bool | None, default: None
DEPRECATED. Use return_in_uV instead.
If True, returned noise levels are scaled to uV
return_in_uV : bool, default: True
If True, returned noise levels are scaled to uV
method : "mad" | "std", default: "mad"
The method to use to estimate noise levels
Expand All @@ -763,7 +782,15 @@ def get_noise_levels(
Noise levels for each channel
"""

if return_scaled:
# Handle deprecated return_scaled parameter
if return_scaled is not None:
warnings.warn(
"`return_scaled` is deprecated and will be removed in a future version. Use `return_in_uV` instead.",
category=DeprecationWarning,
)
return_in_uV = return_scaled

if return_in_uV:
key = f"noise_level_{method}_scaled"
else:
key = f"noise_level_{method}_raw"
Expand Down Expand Up @@ -797,7 +824,7 @@ def append_noise_chunk(res):

func = _noise_level_chunk
init_func = _noise_level_chunk_init
init_args = (recording, return_scaled, method)
init_args = (recording, return_in_uV, method)
executor = ChunkRecordingExecutor(
recording,
func,
Expand Down
Loading