-
Notifications
You must be signed in to change notification settings - Fork 19
Expand file tree
/
Copy pathcontroller.py
More file actions
1032 lines (864 loc) · 42.2 KB
/
controller.py
File metadata and controls
1032 lines (864 loc) · 42.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import time
import numpy as np
import json
from spikeinterface.widgets.utils import get_unit_colors
from spikeinterface import compute_sparsity
from spikeinterface.core import get_template_extremum_channel
from spikeinterface.core.sorting_tools import spike_vector_to_indices
from spikeinterface.core.core_tools import check_json
from spikeinterface.curation import validate_curation_dict
from spikeinterface.curation.curation_model import CurationModel
from spikeinterface.widgets.utils import make_units_table_from_analyzer
from .curation_tools import add_merge, default_label_definitions, empty_curation_data
spike_dtype =[('sample_index', 'int64'), ('unit_index', 'int64'),
('channel_index', 'int64'), ('segment_index', 'int64'),
('visible', 'bool'), ('selected', 'bool'), ('rand_selected', 'bool')]
_default_main_settings = dict(
max_visible_units=10,
color_mode='color_by_unit',
use_times=False
)
from spikeinterface.widgets.sorting_summary import _default_displayed_unit_properties
class Controller():
def __init__(self, analyzer=None, backend="qt", parent=None, verbose=False, save_on_compute=False,
curation=False, curation_data=None, label_definitions=None, with_traces=True,
displayed_unit_properties=None,
extra_unit_properties=None, skip_extensions=None, disable_save_settings_button=False):
self.views = []
skip_extensions = skip_extensions if skip_extensions is not None else []
self.skip_extensions = skip_extensions
self.backend = backend
self.disable_save_settings_button = disable_save_settings_button
self.current_curation_saved = True
if self.backend == "qt":
from .backend_qt import SignalHandler
self.signal_handler = SignalHandler(self, parent=parent)
elif self.backend == "panel":
from .backend_panel import SignalHandler
self.signal_handler = SignalHandler(self, parent=parent)
self.with_traces = with_traces
self.analyzer = analyzer
assert self.analyzer.get_extension("random_spikes") is not None
self.return_in_uV = self.analyzer.return_in_uV
self.save_on_compute = save_on_compute
self.verbose = verbose
t0 = time.perf_counter()
self.main_settings = _default_main_settings.copy()
self.num_channels = self.analyzer.get_num_channels()
# this now private and shoudl be acess using function
self._visible_unit_ids = [self.unit_ids[0]]
# sparsity
if self.analyzer.sparsity is None:
self.external_sparsity = compute_sparsity(self.analyzer, method="radius",radius_um=90.)
self.analyzer_sparsity = None
else:
self.external_sparsity = None
self.analyzer_sparsity = self.analyzer.sparsity
# Mandatory extensions: computation forced
if verbose:
print('\tLoading templates')
temp_ext = self.analyzer.get_extension("templates")
if temp_ext is None:
temp_ext = self.analyzer.compute_one_extension("templates")
self.nbefore, self.nafter = temp_ext.nbefore, temp_ext.nafter
self.templates_average = temp_ext.get_templates(operator='average')
if 'std' in temp_ext.params['operators']:
self.templates_std = temp_ext.get_templates(operator='std')
else:
self.templates_std = None
if verbose:
print('\tLoading unit_locations')
ext = analyzer.get_extension('unit_locations')
if ext is None:
print('Force compute "unit_locations" is needed')
ext = analyzer.compute_one_extension('unit_locations')
# only 2D
self.unit_positions = ext.get_data()[:, :2]
# Optional extensions : can be None or skipped
if verbose:
print('\tLoading noise_levels')
ext = analyzer.get_extension('noise_levels')
if ext is None and self.has_extension('recording'):
print('Force compute "noise_levels" is needed')
ext = analyzer.compute_one_extension('noise_levels')
self.noise_levels = ext.get_data() if ext is not None else None
if "quality_metrics" in skip_extensions:
if self.verbose:
print('\tSkipping quality_metrics')
self.metrics = None
else:
if verbose:
print('\tLoading quality_metrics')
qm_ext = analyzer.get_extension('quality_metrics')
if qm_ext is not None:
self.metrics = qm_ext.get_data()
else:
self.metrics = None
if "spike_amplitudes" in skip_extensions:
if self.verbose:
print('\tSkipping spike_amplitudes')
self.spike_amplitudes = None
else:
if verbose:
print('\tLoading spike_amplitudes')
sa_ext = analyzer.get_extension('spike_amplitudes')
if sa_ext is not None:
self.spike_amplitudes = sa_ext.get_data()
else:
self.spike_amplitudes = None
if "spike_locations" in skip_extensions:
if self.verbose:
print('\tSkipping spike_locations')
self.spike_depths = None
else:
if verbose:
print('\tLoading spike_locations')
sl_ext = analyzer.get_extension('spike_locations')
if sl_ext is not None:
self.spike_depths = sl_ext.get_data()["y"]
else:
self.spike_depths = None
if "correlograms" in skip_extensions:
if self.verbose:
print('\tSkipping correlograms')
self.correlograms = None
self.correlograms_bins = None
else:
if verbose:
print('\tLoading correlograms')
ccg_ext = analyzer.get_extension('correlograms')
if ccg_ext is not None:
self.correlograms, self.correlograms_bins = ccg_ext.get_data()
else:
self.correlograms, self.correlograms_bins = None, None
if "isi_histograms" in skip_extensions:
if self.verbose:
print('\tSkipping isi_histograms')
self.isi_histograms = None
self.isi_bins = None
else:
if verbose:
print('\tLoading isi_histograms')
isi_ext = analyzer.get_extension('isi_histograms')
if isi_ext is not None:
self.isi_histograms, self.isi_bins = isi_ext.get_data()
else:
self.isi_histograms, self.isi_bins = None, None
self._similarity_by_method = {}
if "template_similarity" in skip_extensions:
if self.verbose:
print('\tSkipping template_similarity')
else:
if verbose:
print('\tLoading template_similarity')
ts_ext = analyzer.get_extension('template_similarity')
if ts_ext is not None:
method = ts_ext.params["method"]
self._similarity_by_method[method] = ts_ext.get_data()
else:
if len(self.unit_ids) <= 64 and len(self.channel_ids) <= 64:
# precompute similarity when low channel/units count
method = 'l1'
ts_ext = analyzer.compute_one_extension('template_similarity', method=method, save=save_on_compute)
self._similarity_by_method[method] = ts_ext.get_data()
if "waveforms" in skip_extensions:
if self.verbose:
print('\tSkipping waveforms')
self.waveforms_ext = None
else:
if verbose:
print('\tLoading waveforms')
wf_ext = analyzer.get_extension('waveforms')
if wf_ext is not None:
self.waveforms_ext = wf_ext
else:
self.waveforms_ext = None
self._pc_projections = None
if "principal_components" in skip_extensions:
if self.verbose:
print('\tSkipping principal_components')
self.pc_ext = None
else:
if verbose:
print('\tLoading principal_components')
pc_ext = analyzer.get_extension('principal_components')
self.pc_ext = pc_ext
self._potential_merges = None
t1 = time.perf_counter()
if verbose:
print('Loading extensions took', t1 - t0)
t0 = time.perf_counter()
self._extremum_channel = get_template_extremum_channel(self.analyzer, peak_sign='neg', outputs='index')
# some direct attribute
self.num_segments = self.analyzer.get_num_segments()
self.sampling_frequency = self.analyzer.sampling_frequency
# spikeinterface handle colors in matplotlib style tuple values in range (0,1)
self.refresh_colors()
# at init, we set the visible channels as the sparsity of the first unit
if self.analyzer_sparsity is not None:
self.visible_channel_inds = self.analyzer_sparsity.unit_id_to_channel_indices[self.unit_ids[0]].astype("int64")
else:
# if no sparsity, then all channels are visible
assert self.external_sparsity is not None, "No sparsity found"
self.visible_channel_inds = np.flatnonzero(self.external_sparsity.mask[0])
t0 = time.perf_counter()
# make internal spike vector
unit_ids = self.analyzer.unit_ids
num_seg = self.analyzer.get_num_segments()
self.num_spikes = self.analyzer.sorting.count_num_spikes_per_unit(outputs="dict")
# print("self.num_spikes", self.num_spikes)
spike_vector = self.analyzer.sorting.to_spike_vector(concatenated=True, extremum_channel_inds=self._extremum_channel)
# spike_vector = self.analyzer.sorting.to_spike_vector(concatenated=True)
self.random_spikes_indices = self.analyzer.get_extension("random_spikes").get_data()
self.spikes = np.zeros(spike_vector.size, dtype=spike_dtype)
self.spikes['sample_index'] = spike_vector['sample_index']
self.spikes['unit_index'] = spike_vector['unit_index']
self.spikes['segment_index'] = spike_vector['segment_index']
self.spikes['channel_index'] = spike_vector['channel_index']
self.spikes['rand_selected'][:] = False
self.spikes['rand_selected'][self.random_spikes_indices] = True
# self.num_spikes = self.analyzer.sorting.count_num_spikes_per_unit(outputs="dict")
seg_limits = np.searchsorted(self.spikes["segment_index"], np.arange(num_seg + 1))
self.segment_slices = {segment_index: slice(seg_limits[segment_index], seg_limits[segment_index + 1]) for segment_index in range(num_seg)}
spike_vector2 = []
for segment_index in range(num_seg):
seg_slice = self.segment_slices[segment_index]
spike_vector2.append(self.spikes[seg_slice])
self.final_spike_samples = [segment_spike_vector[-1][0] for segment_spike_vector in spike_vector2]
# this is dict of list because per segment spike_indices[segment_index][unit_id]
spike_indices_abs = spike_vector_to_indices(spike_vector2, self.unit_ids, absolute_index=True)
spike_indices = spike_vector_to_indices(spike_vector2, self.unit_ids)
# this is flatten
spike_per_seg = [s.size for s in spike_vector2]
# dict[unit_id] -> all indices for this unit across segments
self._spike_index_by_units = {}
# dict[segment_index][unit_id] -> all indices for this unit for one segment
self._spike_index_by_segment_and_units = spike_indices_abs
for unit_id in self.unit_ids:
inds = []
for seg_ind in range(num_seg):
inds.append(spike_indices[seg_ind][unit_id] + int(np.sum(spike_per_seg[:seg_ind])))
self._spike_index_by_units[unit_id] = np.concatenate(inds)
t1 = time.perf_counter()
if verbose:
print('Gathering all spikes took', t1 - t0)
self._spike_visible_indices = np.array([], dtype='int64')
self._spike_selected_indices = np.array([], dtype='int64')
self.update_visible_spikes()
self._traces_cached = {}
self.units_table = make_units_table_from_analyzer(analyzer, extra_properties=extra_unit_properties)
if displayed_unit_properties is None:
displayed_unit_properties = list(_default_displayed_unit_properties)
if extra_unit_properties is not None:
displayed_unit_properties += list(extra_unit_properties.keys())
displayed_unit_properties = [v for v in displayed_unit_properties if v in self.units_table.columns]
self.displayed_unit_properties = displayed_unit_properties
# set default time info
self.update_time_info()
self.curation = curation
# TODO: Reload the dictionary if it already exists
if self.curation:
# rules:
# * if user sends curation_data, then it is used
# * otherwise, if curation_data already exists in folder it is used
# * otherwise create an empty one
if curation_data is not None:
# validate the curation data
format_version = curation_data.get("format_version", None)
# assume version 2 if not present
if format_version is None:
raise ValueError("Curation data format version is missing and is required in the curation data.")
try:
validate_curation_dict(curation_data)
except Exception as e:
raise ValueError(f"Invalid curation data.\nError: {e}")
if curation_data.get("merges") is None:
curation_data["merges"] = []
else:
# here we reset the merges for better formatting (str)
existing_merges = curation_data["merges"]
new_merges = []
for m in existing_merges:
if "unit_ids" not in m:
continue
if len(m["unit_ids"]) < 2:
continue
new_merges = add_merge(new_merges, m["unit_ids"])
curation_data["merges"] = new_merges
if curation_data.get("splits") is None:
curation_data["splits"] = []
if curation_data.get("removed") is None:
curation_data["removed"] = []
elif self.analyzer.format == "binary_folder":
json_file = self.analyzer.folder / "spikeinterface_gui" / "curation_data.json"
if json_file.exists():
with open(json_file, "r") as f:
curation_data = json.load(f)
elif self.analyzer.format == "zarr":
import zarr
zarr_root = zarr.open(self.analyzer.folder, mode='r')
if "spikeinterface_gui" in zarr_root.keys() and "curation_data" in zarr_root["spikeinterface_gui"].attrs.keys():
curation_data = zarr_root["spikeinterface_gui"].attrs["curation_data"]
if curation_data is None:
curation_data = empty_curation_data.copy()
self.curation_data = curation_data
self.has_default_quality_labels = False
if "label_definitions" not in self.curation_data:
if label_definitions is not None:
self.curation_data["label_definitions"] = label_definitions
else:
self.curation_data["label_definitions"] = default_label_definitions.copy()
if "quality" in self.curation_data["label_definitions"]:
curation_dict_quality_labels = self.curation_data["label_definitions"]["quality"]["label_options"]
default_quality_labels = default_label_definitions["quality"]["label_options"]
if set(curation_dict_quality_labels) == set(default_quality_labels):
if self.verbose:
print('Curation quality labels are the default ones')
self.has_default_quality_labels = True
def check_is_view_possible(self, view_name):
from .viewlist import possible_class_views
view_class = possible_class_views[view_name]
if view_class._depend_on is not None:
depencies_ok = all(self.has_extension(k) for k in view_class._depend_on)
if not depencies_ok:
if self.verbose:
print(view_name, 'does not have all dependencies', view_class._depend_on)
return False
return True
def declare_a_view(self, new_view):
assert new_view not in self.views, 'view already declared {}'.format(self)
self.views.append(new_view)
self.signal_handler.connect_view(new_view)
@property
def channel_ids(self):
return self.analyzer.channel_ids
@property
def unit_ids(self):
return self.analyzer.unit_ids
def get_time(self):
"""
Returns selected time and segment index
"""
segment_index = self.time_info['segment_index']
time_by_seg = self.time_info['time_by_seg']
time = time_by_seg[segment_index]
return time, segment_index
def set_time(self, time=None, segment_index=None):
"""
Set selected time and segment index.
If time is None, then the current time is used.
If segment_index is None, then the current segment index is used.
"""
if segment_index is not None:
self.time_info['segment_index'] = segment_index
else:
segment_index = self.time_info['segment_index']
if time is not None:
self.time_info['time_by_seg'][segment_index] = time
def update_time_info(self):
# set default time info
if self.main_settings["use_times"] and self.has_extension("recording"):
time_by_seg=np.array(
[
self.analyzer.recording.get_start_time(segment_index) for segment_index in range(self.num_segments)
],
dtype="float64"
)
else:
time_by_seg=np.array([0] * self.num_segments, dtype="float64")
if not hasattr(self, 'time_info'):
self.time_info = dict(
time_by_seg=time_by_seg,
segment_index=0
)
else:
self.time_info['time_by_seg'] = time_by_seg
def get_t_start_t_stop(self):
segment_index = self.time_info["segment_index"]
if self.main_settings["use_times"] and self.has_extension("recording"):
t_start = self.analyzer.recording.get_start_time(segment_index=segment_index)
t_stop = self.analyzer.recording.get_end_time(segment_index=segment_index)
return t_start, t_stop
else:
return 0, self.get_num_samples(segment_index) / self.sampling_frequency
def get_times_chunk(self, segment_index, t1, t2):
ind1, ind2 = self.get_chunk_indices(t1, t2, segment_index)
if self.main_settings["use_times"]:
recording = self.analyzer.recording
times_chunk = recording.get_times(segment_index=segment_index)[ind1:ind2]
else:
times_chunk = np.arange(ind2 - ind1, dtype='float64') / self.sampling_frequency + max(t1, 0)
return times_chunk
def get_chunk_indices(self, t1, t2, segment_index):
if self.main_settings["use_times"]:
recording = self.analyzer.recording
ind1, ind2 = recording.time_to_sample_index([t1, t2], segment_index=segment_index)
else:
t_start = 0.0
sr = self.sampling_frequency
ind1 = int((t1 - t_start) * sr)
ind2 = int((t2 - t_start) * sr)
ind1 = max(0, ind1)
ind2 = min(self.get_num_samples(segment_index), ind2)
return ind1, ind2
def sample_index_to_time(self, sample_index):
segment_index = self.time_info["segment_index"]
if self.main_settings["use_times"] and self.has_extension("recording"):
time = self.analyzer.recording.sample_index_to_time(sample_index, segment_index=segment_index)
return time
else:
return sample_index / self.sampling_frequency
def time_to_sample_index(self, time):
segment_index = self.time_info["segment_index"]
if self.main_settings["use_times"] and self.has_extension("recording"):
time = self.analyzer.recording.time_to_sample_index(time, segment_index=segment_index)
return time
else:
return int(time * self.sampling_frequency)
def get_information_txt(self):
nseg = self.analyzer.get_num_segments()
nchan = self.analyzer.get_num_channels()
nunits = self.analyzer.get_num_units()
txt = f"{nchan} channels - {nunits} units - {nseg} segments - {self.analyzer.format}\n"
txt += f"Loaded {len(self.analyzer.extensions)} extensions"
return txt
def refresh_colors(self):
if self.backend == "qt":
self._cached_qcolors = {}
elif self.backend == "panel":
pass
if self.main_settings['color_mode'] == 'color_by_unit':
self.colors = get_unit_colors(self.analyzer.sorting, color_engine='matplotlib', map_name='gist_ncar',
shuffle=True, seed=42)
elif self.main_settings['color_mode'] == 'color_only_visible':
unit_colors = get_unit_colors(self.analyzer.sorting, color_engine='matplotlib', map_name='gist_ncar',
shuffle=True, seed=42)
self.colors = {unit_id: (0.3, 0.3, 0.3, 1.) for unit_id in self.unit_ids}
for unit_id in self.get_visible_unit_ids():
self.colors[unit_id] = unit_colors[unit_id]
elif self.main_settings['color_mode'] == 'color_by_visibility':
self.colors = {unit_id: (0.3, 0.3, 0.3, 1.) for unit_id in self.unit_ids}
import matplotlib.pyplot as plt
cmap = plt.colormaps['tab10']
for i, unit_id in enumerate(self.get_visible_unit_ids()):
self.colors[unit_id] = cmap(i)
def get_unit_color(self, unit_id):
# scalar unit_id -> color html or QtColor
return self.colors[unit_id]
def get_spike_colors(self, unit_indices):
# array[unit_ind] -> array[color html or QtColor]
colors = np.zeros((unit_indices.size, 4), dtype="uint8")
unit_inds = np.unique(unit_indices)
for unit_ind in unit_inds:
unit_id = self.unit_ids[unit_ind]
mask = unit_indices == unit_ind
colors[mask] = np.array(self.get_unit_color(unit_id)) * 255
return colors
def get_extremum_channel(self, unit_id):
chan_ind = self._extremum_channel[unit_id]
return chan_ind
# unit visibility zone
def set_visible_unit_ids(self, visible_unit_ids):
"""Make visible some units, all other off"""
lim = self.main_settings['max_visible_units']
if len(visible_unit_ids) > lim:
visible_unit_ids = visible_unit_ids[:lim]
self._visible_unit_ids = list(visible_unit_ids)
def get_visible_unit_ids(self):
"""Get list of visible unit_ids"""
return self._visible_unit_ids
def get_visible_unit_indices(self):
"""Get list of indicies of visible units"""
unit_ids = list(self.unit_ids)
visible_unit_indices = [unit_ids.index(u) for u in self._visible_unit_ids]
return visible_unit_indices
def set_all_unit_visibility_off(self):
"""As in the name"""
self._visible_unit_ids = []
def iter_visible_units(self):
"""For looping over unit_ind and unit_id"""
visible_unit_indices = self.get_visible_unit_indices()
visible_unit_ids = self._visible_unit_ids
return zip(visible_unit_indices, visible_unit_ids)
def set_unit_visibility(self, unit_id, state):
"""Change the visibility of on unit, other are unchanged"""
if state and not(unit_id in self._visible_unit_ids):
self._visible_unit_ids.append(unit_id)
elif not state and unit_id in self._visible_unit_ids:
self._visible_unit_ids.remove(unit_id)
def get_unit_visibility(self, unit_id):
"""Get thethe visibility of on unit"""
return unit_id in self._visible_unit_ids
def get_units_visibility_mask(self):
"""Get bool mask of visibility"""
mask = np.zeros(self.unit_ids.size, dtype='bool')
mask[self.get_visible_unit_indices()] = True
return mask
def get_dict_unit_visible(self):
"""Construct the visibility dict keys are unit_ids, previous behavior"""
dict_unit_visible = {u:False for u in self.unit_ids}
for u in self.get_visible_unit_ids():
dict_unit_visible[u] = True
return dict_unit_visible
## end unit visibility zone
def update_visible_spikes(self):
inds = []
for unit_index, unit_id in self.iter_visible_units():
inds.append(self._spike_index_by_units[unit_id])
if len(inds) > 0:
inds = np.concatenate(inds)
inds = np.sort(inds)
else:
inds = np.array([], dtype='int64')
self._spike_visible_indices = inds
self._spike_selected_indices = np.array([], dtype='int64')
def get_indices_spike_visible(self):
return self._spike_visible_indices
def get_indices_spike_selected(self):
return self._spike_selected_indices
def set_indices_spike_selected(self, inds):
self._spike_selected_indices = np.array(inds)
# reset active split if needed
if len(self._spike_selected_indices) == 1:
# set time info
segment_index = self.spikes['segment_index'][self._spike_selected_indices[0]]
sample_index = self.spikes['sample_index'][self._spike_selected_indices[0]]
self.set_time(time=self.sample_index_to_time(sample_index), segment_index=segment_index)
def get_spike_indices(self, unit_id, segment_index=None):
if segment_index is None:
# dict[unit_id] -> all indices for this unit across segments
return self._spike_index_by_units[unit_id]
else:
# dict[segment_index][unit_id] -> all indices for this unit for one segment
return self._spike_index_by_segment_and_units[segment_index][unit_id]
def get_num_samples(self, segment_index):
return self.analyzer.get_num_samples(segment_index=segment_index)
def get_traces(self, trace_source='preprocessed', **kargs):
# assert trace_source in ['preprocessed', 'raw']
assert trace_source in ['preprocessed']
cache_key = (kargs.get("segment_index", None), kargs.get("start_frame", None), kargs.get("end_frame", None))
if cache_key in self._traces_cached:
return self._traces_cached[cache_key]
else:
# check if start_frame and end_frame are a subset interval of a cached one
for cached_key in self._traces_cached.keys():
cached_seg = cached_key[0]
cached_start = cached_key[1]
cached_end = cached_key[2]
req_seg = kargs.get("segment_index", None)
req_start = kargs.get("start_frame", None)
req_end = kargs.get("end_frame", None)
if cached_seg is not None and req_seg is not None:
if cached_seg != req_seg:
continue
if cached_start is not None and cached_end is not None and req_start is not None and req_end is not None:
if req_start >= cached_start and req_end <= cached_end:
# subset found
traces = self._traces_cached[cached_key]
start_offset = req_start - cached_start
end_offset = req_end - cached_start
return traces[start_offset:end_offset, :]
if len(self._traces_cached) > 4:
self._traces_cached.pop(list(self._traces_cached.keys())[0])
if trace_source == 'preprocessed':
rec = self.analyzer.recording
elif trace_source == 'raw':
raise NotImplementedError("Raw traces not implemented yet")
# TODO get with parent recording the non process recording
kargs['return_in_uV'] = self.return_in_uV
traces = rec.get_traces(**kargs)
# put in cache for next call
self._traces_cached[cache_key] = traces
return traces
def get_contact_location(self):
location = self.analyzer.get_channel_locations()
return location
def get_waveform_sweep(self):
return self.nbefore, self.nafter
def get_waveforms_range(self):
return np.nanmin(self.templates_average), np.nanmax(self.templates_average)
def get_waveforms(self, unit_id, force_dense=False):
wfs = self.waveforms_ext.get_waveforms_one_unit(unit_id, force_dense=force_dense)
if self.analyzer.sparsity is None or force_dense:
# dense waveforms
chan_inds = np.arange(self.analyzer.get_num_channels(), dtype='int64')
else:
# sparse waveforms
chan_inds = self.analyzer.sparsity.unit_id_to_channel_indices[unit_id]
return wfs, chan_inds
def get_common_sparse_channels(self, unit_ids):
sparsity_mask = self.get_sparsity_mask()
unit_indexes = [list(self.unit_ids).index(u) for u in unit_ids]
chan_inds, = np.nonzero(sparsity_mask[unit_indexes, :].sum(axis=0))
return chan_inds
def get_intersect_sparse_channels(self, unit_ids):
sparsity_mask = self.get_sparsity_mask()
unit_indexes = [list(self.unit_ids).index(u) for u in unit_ids]
chan_inds, = np.nonzero(sparsity_mask[unit_indexes, :].sum(axis=0) == len(unit_ids))
return chan_inds
def get_probegroup(self):
return self.analyzer.get_probegroup()
def set_channel_visibility(self, visible_channel_inds):
self.visible_channel_inds = np.array(visible_channel_inds, copy=True)
def has_extension(self, extension_name):
if extension_name == 'recording':
return self.analyzer.has_recording() or self.analyzer.has_temporary_recording()
else:
# extension needs to be loaded
if extension_name in self.skip_extensions:
return False
else:
return extension_name in self.analyzer.extensions
def handle_metrics(self):
return self.metrics is not None
def get_units_table(self):
return self.units_table
def get_all_pcs(self):
if self._pc_projections is None and self.pc_ext is not None:
self._pc_projections, self._pc_indices = self.pc_ext.get_some_projections(
channel_ids=self.analyzer.channel_ids,
unit_ids=self.analyzer.unit_ids
)
return self._pc_indices, self._pc_projections
else:
return None, None
def get_sparsity_mask(self):
if self.external_sparsity is not None:
return self.external_sparsity.mask
else:
return self.analyzer_sparsity.mask
def get_similarity(self, method=None):
if method is None and len(self._similarity_by_method) == 1:
method = list(self._similarity_by_method.keys())[0]
similarity = self._similarity_by_method.get(method, None)
return similarity
def compute_similarity(self, method='l1'):
# have internal cache
if method in self._similarity_by_method:
return self._similarity_by_method[method]
ext = self.analyzer.compute("template_similarity", method=method, save=self.save_on_compute)
self._similarity_by_method[method] = ext.get_data()
return self._similarity_by_method[method]
def compute_unit_positions(self, method, method_kwargs):
ext = self.analyzer.compute_one_extension('unit_locations', save=self.save_on_compute, method=method, **method_kwargs)
# 2D only
self.unit_positions = ext.get_data()[:, :2]
def get_correlograms(self):
return self.correlograms, self.correlograms_bins
def compute_correlograms(self, window_ms, bin_ms):
ext = self.analyzer.compute("correlograms", save=self.save_on_compute, window_ms=window_ms, bin_ms=bin_ms)
self.correlograms, self.correlograms_bins = ext.get_data()
return self.correlograms, self.correlograms_bins
def get_isi_histograms(self):
return self.isi_histograms, self.isi_bins
def compute_isi_histograms(self, window_ms, bin_ms):
ext = self.analyzer.compute("isi_histograms", save=self.save_on_compute, window_ms=window_ms, bin_ms=bin_ms)
self.isi_histograms, self.isi_bins = ext.get_data()
return self.isi_histograms, self.isi_bins
def get_units_table(self):
return self.units_table
def compute_auto_merge(self, **params):
from spikeinterface.curation import compute_merge_unit_groups
merge_unit_groups, extra = compute_merge_unit_groups(
self.analyzer,
extra_outputs=True,
resolve_graph=False,
**params
)
return merge_unit_groups, extra
def curation_can_be_saved(self):
return self.analyzer.format != "memory"
def construct_final_curation(self):
d = dict()
d["format_version"] = "2"
d["unit_ids"] = self.unit_ids.tolist()
d.update(self.curation_data.copy())
model = CurationModel(**d)
return model
def save_curation_in_analyzer(self):
if self.analyzer.format == "memory":
print("Analyzer is an in-memory object. Cannot save curation file in it.")
pass
elif self.analyzer.format == "binary_folder":
folder = self.analyzer.folder / "spikeinterface_gui"
folder.mkdir(exist_ok=True, parents=True)
json_file = folder / f"curation_data.json"
curation_model = self.construct_final_curation()
with open(json_file, "w") as f:
f.write(curation_model.model_dump_json(indent=4))
self.current_curation_saved = True
elif self.analyzer.format == "zarr":
import zarr
zarr_root = zarr.open(self.analyzer.folder, mode='r+')
if "spikeinterface_gui" not in zarr_root.keys():
sigui_group = zarr_root.create_group("spikeinterface_gui", overwrite=True)
sigui_group = zarr_root["spikeinterface_gui"]
curation_model = self.construct_final_curation()
sigui_group.attrs["curation_data"] = curation_model.model_dump(mode="json")
self.current_curation_saved = True
def get_split_unit_ids(self):
if not self.curation:
return []
return [s["unit_id"] for s in self.curation_data["splits"]]
def make_manual_delete_if_possible(self, removed_unit_ids):
"""
Check if a unit_ids can be removed.
If unit are already deleted or in a merge group then the delete operation is skipped.
"""
if not self.curation:
return False
all_merged_units = sum([m["unit_ids"] for m in self.curation_data["merges"]], [])
all_split_units = [s["unit_id"] for s in self.curation_data["splits"]]
for unit_id in removed_unit_ids:
if unit_id in self.curation_data["removed"]:
return False
if unit_id in all_merged_units:
return False
if unit_id in all_split_units:
return False
self.curation_data["removed"].append(unit_id)
if self.verbose:
print(f"Unit {unit_id} is removed from the curation data")
return True
def make_manual_restore(self, restore_unit_ids):
"""
pop unit_ids from the removed_units list which is a restore.
"""
if not self.curation:
return
for unit_id in restore_unit_ids:
if unit_id in self.curation_data["removed"]:
if self.verbose:
print(f"Unit {unit_id} is restored from the curation data")
self.curation_data["removed"].remove(unit_id)
def make_manual_merge_if_possible(self, merge_unit_ids):
"""
Check if the a list of unit_ids can be added as a new merge to the curation_data.
If some unit_ids are already in the removed list then the merge is skipped.
If unit_ids are already is some other merge then the connectivity graph is resolved groups can be
eventually merged.
"""
if not self.curation:
return False
if len(merge_unit_ids) < 2:
return False
all_split_units = [s["unit_id"] for s in self.curation_data["splits"]]
for unit_id in merge_unit_ids:
if unit_id in self.curation_data["removed"]:
return False
if unit_id in all_split_units:
return False
new_merges = add_merge(self.curation_data["merges"], merge_unit_ids)
self.curation_data["merges"] = new_merges
if self.verbose:
print(f"Merged unit group: {[str(u) for u in merge_unit_ids]}")
return True
def make_manual_split_if_possible(self, unit_id):
"""
Check if the a unit_id can be split into a new split in the curation_data.
If unit_id is already in the removed list then the split is skipped.
If unit_id is already in some other split then the split is skipped.
"""
if not self.curation:
return False
if unit_id in self.curation_data["removed"]:
return False
for merge in self.curation_data["merges"]:
if unit_id in merge["unit_ids"]:
return False
# check if unit_id is already in a split
for split in self.curation_data["splits"]:
if split["unit_id"] == unit_id:
# remove existing split and replace it
if self.verbose:
print(f"Unit {unit_id} is already split, removing existing split and replacing it")
self.curation_data["splits"].remove(split)
break
# check that selected indices are not empty and from unit_id
visible_unit_ids = self.get_visible_unit_ids()
if unit_id not in visible_unit_ids:
return False
indices = self.get_indices_spike_selected()
if len(indices) == 0:
return False
spike_inds = self.get_spike_indices(unit_id, segment_index=None)
if not np.all(np.isin(indices, spike_inds)):
return False
# convert selected indices to indices within the spike train of the unit
indices = [np.where(spike_inds == ind)[0][0] for ind in indices]
new_split = {
"unit_id": unit_id,
"mode": "indices",
"indices": [indices]
}
self.curation_data["splits"].append(new_split)
if self.verbose:
print(f"Split unit {unit_id} with {len(indices)} spikes")
return True
def make_manual_restore_merge(self, merge_indices):
if not self.curation:
return
self.curation_data["merges"] = [m for i, m in enumerate(self.curation_data["merges"]) if i not in merge_indices]
def make_manual_restore_split(self, split_indices):
if not self.curation:
return
self.curation_data["splits"] = [s for i, s in enumerate(self.curation_data["splits"]) if i not in split_indices]
def get_curation_label_definitions(self):
# give only label definition with exclusive
label_definitions = {}
for k, lbl_def in self.curation_data["label_definitions"].items():
if lbl_def['exclusive']:
label_definitions[k] = lbl_def.copy()
return label_definitions
def find_unit_in_manual_labels(self, unit_id):
for ix, lbl in enumerate(self.curation_data["manual_labels"]):
if lbl["unit_id"] == unit_id:
return ix
def get_unit_label(self, unit_id, category):
ix = self.find_unit_in_manual_labels(unit_id)
if ix is None:
return
lbl = self.curation_data["manual_labels"][ix]
if "labels" in lbl and category in lbl["labels"]:
# v2 format
labels = lbl["labels"][category]
return labels[0]
elif category in lbl:
# v1 format
labels = lbl[category]
return labels[0]
def set_label_to_unit(self, unit_id, category, label):
if label is None:
self.remove_category_from_unit(unit_id, category)
return
ix = self.find_unit_in_manual_labels(unit_id)
if ix is not None:
lbl = self.curation_data["manual_labels"][ix]
if "labels" in lbl and category in lbl["labels"]: