I just noticed a possible bug. If all the units in a json curation file (created by spikeinterface-gui) are either split or deleted, applying curation fails:
IndexError Traceback (most recent call last)
Cell In[16], line 1
----> 1 analyzer_curated = scur.apply_curation(sorting_analyzer,curation_dict_or_model=cur)
File ~/arbeit/PyCode/spikeinterface/spikeinterface-git/src/spikeinterface/curation/curation_format.py:287, in apply_curation(sorting_or_analyzer, curation_dict_or_model, censor_ms, new_id_strategy, merging_mode, sparsity_overlap, raise_error_if_overlap_fails, verbose, **job_kwargs)
279 curated_sorting_or_analyzer, _ = apply_splits_to_sorting(
280 curated_sorting_or_analyzer,
281 split_units,
(...)
284 return_extra=True,
285 )
286 else:
--> 287 curated_sorting_or_analyzer, _ = curated_sorting_or_analyzer.split_units(
288 split_units,
289 new_id_strategy=new_id_strategy,
290 return_new_unit_ids=True,
291 new_unit_ids=split_new_unit_ids,
292 format="memory",
293 verbose=verbose,
294 )
296 return curated_sorting_or_analyzer
File ~/arbeit/PyCode/spikeinterface/spikeinterface-git/src/spikeinterface/core/sortinganalyzer.py:1426, in SortingAnalyzer.split_units(self, split_units, new_unit_ids, new_id_strategy, return_new_unit_ids, format, folder, verbose, **job_kwargs)
1423 new_unit_ids = generate_unit_ids_for_split(self.unit_ids, split_units, new_unit_ids, new_id_strategy)
1424 all_unit_ids = _get_ids_after_splitting(self.unit_ids, split_units, new_unit_ids=new_unit_ids)
-> 1426 new_analyzer = self._save_or_select_or_merge_or_split(
1427 format=format,
1428 folder=folder,
1429 split_units=split_units,
1430 unit_ids=all_unit_ids,
1431 verbose=verbose,
1432 split_new_unit_ids=new_unit_ids,
1433 **job_kwargs,
1434 )
1435 if return_new_unit_ids:
1436 return new_analyzer, new_unit_ids
File ~/arbeit/PyCode/spikeinterface/spikeinterface-git/src/spikeinterface/core/sortinganalyzer.py:1154, in SortingAnalyzer._save_or_select_or_merge_or_split(self, format, folder, unit_ids, merge_unit_groups, censor_ms, merging_mode, sparsity_overlap, merge_new_unit_ids, split_units, splitting_mode, split_new_unit_ids, backend_options, verbose, **job_kwargs)
1151 else:
1152 # split
1153 if splitting_mode == "soft":
-> 1154 new_sorting_analyzer.extensions[extension_name] = extension.split(
1155 new_sorting_analyzer, split_units=split_units, new_unit_ids=split_new_unit_ids, verbose=verbose
1156 )
1157 elif splitting_mode == "hard":
1158 recompute_dict[extension_name] = extension.params
File ~/arbeit/PyCode/spikeinterface/spikeinterface-git/src/spikeinterface/core/sortinganalyzer.py:2628, in AnalyzerExtension.split(self, new_sorting_analyzer, split_units, new_unit_ids, verbose, **job_kwargs)
2626 new_extension = self.__class__(new_sorting_analyzer)
2627 new_extension.params = self.params.copy()
-> 2628 new_extension.data = self._split_extension_data(
2629 split_units, new_unit_ids, new_sorting_analyzer, verbose=verbose, **job_kwargs
2630 )
2631 new_extension.run_info = copy(self.run_info)
2632 new_extension.save()
File ~/arbeit/PyCode/spikeinterface/spikeinterface-git/src/spikeinterface/core/analyzer_extension_core.py:607, in ComputeTemplates._split_extension_data(self, split_units, new_unit_ids, new_sorting_analyzer, verbose, **job_kwargs)
605 new_indices = np.array([new_analyzer_unit_ids.index(unit_id) for unit_id in unsplit_unit_ids])
606 old_indices = self.sorting_analyzer.sorting.ids_to_indices(unsplit_unit_ids)
--> 607 new_array[new_indices, ...] = arr[old_indices, ...]
609 for split_unit_id, new_splits in zip(split_units, new_unit_ids):
610 if new_sorting_analyzer.has_extension("waveforms"):
IndexError: arrays used as indices must be of integer (or boolean) type
if unsplit_unit_ids:
new_indices = np.array([new_analyzer_unit_ids.index(unit_id) for unit_id in unsplit_unit_ids])
old_indices = self.sorting_analyzer.sorting.ids_to_indices(unsplit_unit_ids)
new_array[new_indices, ...] = arr[old_indices, ...]
but that results in a whole bunch of problems downstream. The possible workaround is to simply make sure that there is at least one unit left un-deleted and un-split in a given curation session, and then split or delete it in a new curation session.
spikeinterface v. 0.104.1
spikeinterface-gui v. 0.13.1
Hi,
I just noticed a possible bug. If all the units in a json curation file (created by spikeinterface-gui) are either split or deleted, applying curation fails:
From what I gathered, in line 604 in
analyzer_extension_core.pytheunsplit_unit_idsis empty if there are no units left which were not split. Tried a lazy fixbut that results in a whole bunch of problems downstream. The possible workaround is to simply make sure that there is at least one unit left un-deleted and un-split in a given curation session, and then split or delete it in a new curation session.
spikeinterface v. 0.104.1
spikeinterface-gui v. 0.13.1