diff --git a/environments/environment-Linux.yml b/environments/environment-Linux.yml index 0b14b3303..9b211d7fd 100644 --- a/environments/environment-Linux.yml +++ b/environments/environment-Linux.yml @@ -15,14 +15,14 @@ dependencies: - flask-cors == 4.0.0 - flask_restx == 1.1.0 - werkzeug < 3.0 # werkzeug 3.0 deprecates features used by flask 2.3.2. Remove this when updating flask. - - neuroconv[dandi,compressors] == 0.8.0 + - neuroconv[dandi,compressors] == 0.8.1 - dandi < 0.74.0 # 0.74.0 renamed dandi-staging to dandi-sandbox, breaking neuroconv 0.6.6 - spikeinterface >= 0.101.0 # Previously included via neuroconv[ecephys]; needed for tutorial data generation - pandas < 3.0 # pandas 3.0 uses Arrow backend by default, returning read-only arrays that break spikeinterface Phy extractor - scikit-learn == 1.4.0 # Tutorial data generation - tqdm_publisher >= 0.0.1 # Progress bars - tzlocal >= 5.2 # Frontend timezone handling - - ndx-pose == 0.1.1 - - nwbinspector == 0.6.2 + - ndx-pose >= 0.1.1 + - nwbinspector >= 0.6.2 - tables - numcodecs < 0.16.0 # numcodecs 0.16.0 is not compatible with zarr 2.18.5 diff --git a/environments/environment-MAC-apple-silicon.yml b/environments/environment-MAC-apple-silicon.yml index 5ee2e9513..5f17d82aa 100644 --- a/environments/environment-MAC-apple-silicon.yml +++ b/environments/environment-MAC-apple-silicon.yml @@ -23,13 +23,13 @@ dependencies: - werkzeug < 3.0 # werkzeug 3.0 deprecates features used by flask 2.3.2. Remove this when updating flask. # NOTE: the NeuroConv wheel on PyPI includes sonpy which is not compatible with arm64, so build and install # NeuroConv from GitHub, which will remove the sonpy dependency when building from Mac arm64 - - neuroconv[dandi,compressors] == 0.8.0 + - neuroconv[dandi,compressors] == 0.8.1 - dandi < 0.74.0 # 0.74.0 renamed dandi-staging to dandi-sandbox, breaking neuroconv 0.6.6 - spikeinterface >= 0.101.0 # Previously included via neuroconv[ecephys]; needed for tutorial data generation - pandas < 3.0 # pandas 3.0 uses Arrow backend by default, returning read-only arrays that break spikeinterface Phy extractor - scikit-learn == 1.4.0 # Tutorial data generation - tqdm_publisher >= 0.0.1 # Progress bars - tzlocal >= 5.2 # Frontend timezone handling - - ndx-pose == 0.1.1 - - nwbinspector == 0.6.2 + - ndx-pose >= 0.1.1 + - nwbinspector >= 0.6.2 - numcodecs < 0.16.0 # numcodecs 0.16.0 is not compatible with zarr 2.18.5 diff --git a/environments/environment-MAC-intel.yml b/environments/environment-MAC-intel.yml index 4b77b869d..e925ef391 100644 --- a/environments/environment-MAC-intel.yml +++ b/environments/environment-MAC-intel.yml @@ -18,15 +18,15 @@ dependencies: - flask-cors == 4.0.0 - flask_restx == 1.1.0 - werkzeug < 3.0 # werkzeug 3.0 deprecates features used by flask 2.3.2. Remove this when updating flask. - - neuroconv[dandi,compressors] == 0.8.0 + - neuroconv[dandi,compressors] == 0.8.1 - dandi < 0.74.0 # 0.74.0 renamed dandi-staging to dandi-sandbox, breaking neuroconv 0.6.6 - spikeinterface >= 0.101.0 # Previously included via neuroconv[ecephys]; needed for tutorial data generation - pandas < 3.0 # pandas 3.0 uses Arrow backend by default, returning read-only arrays that break spikeinterface Phy extractor - scikit-learn == 1.4.0 # Tutorial data generation - tqdm_publisher >= 0.0.1 # Progress bars - tzlocal >= 5.2 # Frontend timezone handling - - ndx-pose == 0.1.1 - - nwbinspector == 0.6.2 + - ndx-pose >= 0.1.1 + - nwbinspector >= 0.6.2 - numcodecs < 0.16.0 # numcodecs 0.16.0 is not compatible with zarr 2.18.5 - h5py == 3.12.1 # 3.13.0 uses features in hdf5 1.14.4 that is not available in earlier hdf5 libs packaged # with tables==3.9.1 (latest that can be used by neuroconv 0.6.0). diff --git a/environments/environment-Windows.yml b/environments/environment-Windows.yml index 605c00d86..838cecf50 100644 --- a/environments/environment-Windows.yml +++ b/environments/environment-Windows.yml @@ -18,14 +18,14 @@ dependencies: - flask-cors === 3.0.10 - flask_restx == 1.1.0 - werkzeug < 3.0 # werkzeug 3.0 deprecates features used by flask 2.3.2. Remove this when updating flask. - - neuroconv[dandi,compressors] == 0.8.0 + - neuroconv[dandi,compressors] == 0.8.1 - dandi < 0.74.0 # 0.74.0 renamed dandi-staging to dandi-sandbox, breaking neuroconv 0.6.6 - spikeinterface >= 0.101.0 # Previously included via neuroconv[ecephys]; needed for tutorial data generation - pandas < 3.0 # pandas 3.0 uses Arrow backend by default, returning read-only arrays that break spikeinterface Phy extractor - scikit-learn == 1.4.0 # Tutorial data generation - tqdm_publisher >= 0.0.1 # Progress bars - tzlocal >= 5.2 # Frontend timezone handling - - ndx-pose == 0.1.1 - - nwbinspector == 0.6.2 + - ndx-pose >= 0.1.1 + - nwbinspector >= 0.6.2 - tables - numcodecs < 0.16.0 # numcodecs 0.16.0 is not compatible with zarr 2.18.5 diff --git a/src/pyflask/manageNeuroconv/manage_neuroconv.py b/src/pyflask/manageNeuroconv/manage_neuroconv.py index 6247ce254..cd2f98403 100644 --- a/src/pyflask/manageNeuroconv/manage_neuroconv.py +++ b/src/pyflask/manageNeuroconv/manage_neuroconv.py @@ -610,6 +610,19 @@ def on_recording_interface(name, recording_interface): "additionalProperties": True, # Allow for new columns } + # Ensure ElectrodeColumns includes entries for all Electrode schema properties + # (needed for frontend linked-table validation in neuroconv >= 0.7.5) + existing_electrode_columns = ecephys_metadata.get("ElectrodeColumns", []) + existing_ecol_names = {col["name"] for col in existing_electrode_columns} + for prop_name, prop_info in new_electrodes_properties.items(): + if prop_name not in existing_ecol_names: + existing_electrode_columns.append( + { + "name": prop_name, + "description": prop_info.get("description", "No description."), + } + ) + if has_units: unitprops_def = defs["UnitProperties"] @@ -1380,7 +1393,7 @@ def upload_folder_to_dandi( return automatic_dandi_upload( dandiset_id=dandiset_id, nwb_folder_path=Path(nwb_folder_path), - staging=sandbox, # Map sandbox parameter to staging for external API + sandbox=sandbox, cleanup=cleanup, number_of_jobs=number_of_jobs or 1, number_of_threads=number_of_threads or 1, @@ -1414,7 +1427,7 @@ def upload_project_to_dandi( return automatic_dandi_upload( dandiset_id=dandiset_id, nwb_folder_path=CONVERSION_SAVE_FOLDER_PATH / project, # Scope valid DANDI upload paths to GUIDE projects - staging=sandbox, # Map sandbox parameter to staging for external API + sandbox=sandbox, cleanup=cleanup, number_of_jobs=number_of_jobs, number_of_threads=number_of_threads,