Skip to content

Commit 3c20f7e

Browse files
Donglai Weiclaude
andcommitted
Remove legacy aliases and backward-compat shims from config schema
- Remove dead schema fields: rotate90_axes, val_split, train_resolution, max_angle, pad_size (SlidingWindowConfig), val_check_interval (ScalarLoggingConfig) - Remove compute_bbox_all_2d/3d aliases (only compute_bbox_all is used) - Fix misleading "legacy" comments on active fields (keep_input_on_cpu, rotation90_axes, MotionBlurConfig, val_check_interval on optimization) - Change rotation90_axes type to Any to support "all" string value - Auto-generate SQLite storage for Optuna studies when save_study=True - Update 13 tutorial YAMLs to use canonical field names - Includes linter auto-formatting across the codebase Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
1 parent 4e2d895 commit 3c20f7e

151 files changed

Lines changed: 2764 additions & 2348 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

connectomics/config/__init__.py

Lines changed: 20 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -2,41 +2,40 @@
22
Modern Hydra-based configuration system for PyTorch Connectomics.
33
"""
44

5-
# New Hydra config system (primary)
6-
from .schema import Config
7-
from .pipeline.config_io import (
8-
load_config,
9-
save_config,
10-
merge_configs,
11-
update_from_cli,
12-
to_dict,
13-
from_dict,
14-
print_config,
15-
validate_config,
16-
get_config_hash,
17-
create_experiment_name,
18-
resolve_data_paths,
19-
)
20-
from .pipeline.stage_resolver import resolve_default_profiles
21-
from .pipeline.dict_utils import to_plain, as_plain_dict, cfg_get
22-
235
# Auto-configuration system
246
from .hardware.auto_config import (
25-
auto_plan_config,
267
AutoConfigPlanner,
278
AutoPlanResult,
9+
auto_plan_config,
2810
resolve_runtime_resource_sentinels,
2911
)
3012

3113
# GPU utilities
3214
from .hardware.gpu_utils import (
15+
estimate_gpu_memory_required,
3316
get_gpu_info,
17+
get_optimal_num_workers,
3418
print_gpu_info,
3519
suggest_batch_size,
36-
estimate_gpu_memory_required,
37-
get_optimal_num_workers,
3820
)
21+
from .pipeline.config_io import (
22+
create_experiment_name,
23+
from_dict,
24+
get_config_hash,
25+
load_config,
26+
merge_configs,
27+
print_config,
28+
resolve_data_paths,
29+
save_config,
30+
to_dict,
31+
update_from_cli,
32+
validate_config,
33+
)
34+
from .pipeline.dict_utils import as_plain_dict, cfg_get, to_plain
35+
from .pipeline.stage_resolver import resolve_default_profiles
3936

37+
# New Hydra config system (primary)
38+
from .schema import Config
4039

4140
__all__ = [
4241
# Hydra config system

connectomics/config/hardware/__init__.py

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,19 @@
11
"""GPU-aware auto-planning and SLURM cluster utilities."""
22

3-
from .auto_config import auto_plan_config, AutoConfigPlanner, AutoPlanResult, resolve_runtime_resource_sentinels
4-
from .gpu_utils import get_gpu_info, print_gpu_info, suggest_batch_size, estimate_gpu_memory_required, get_optimal_num_workers
53
from . import slurm_utils
4+
from .auto_config import (
5+
AutoConfigPlanner,
6+
AutoPlanResult,
7+
auto_plan_config,
8+
resolve_runtime_resource_sentinels,
9+
)
10+
from .gpu_utils import (
11+
estimate_gpu_memory_required,
12+
get_gpu_info,
13+
get_optimal_num_workers,
14+
print_gpu_info,
15+
suggest_batch_size,
16+
)
617

718
__all__ = [
819
"auto_plan_config",

connectomics/config/hardware/auto_config.py

Lines changed: 17 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -11,25 +11,26 @@
1111
Users can manually override any auto-determined parameters.
1212
"""
1313

14-
1514
from __future__ import annotations
15+
1616
import logging
17-
import numpy as np
18-
from typing import Dict, List, Optional, Any
19-
from dataclasses import dataclass, field
20-
from omegaconf import OmegaConf, DictConfig
21-
import warnings
2217
import os
18+
import warnings
19+
from dataclasses import dataclass, field
20+
from typing import Any, Dict, List, Optional
2321

24-
logger = logging.getLogger(__name__)
22+
import numpy as np
23+
from omegaconf import DictConfig, OmegaConf
2524

2625
from .gpu_utils import (
26+
estimate_gpu_memory_required,
2727
get_gpu_info,
28-
suggest_batch_size,
2928
get_optimal_num_workers,
30-
estimate_gpu_memory_required,
29+
suggest_batch_size,
3130
)
3231

32+
logger = logging.getLogger(__name__)
33+
3334

3435
def _available_cpus_for_current_run() -> int:
3536
"""
@@ -120,7 +121,9 @@ def resolve_runtime_resource_sentinels(
120121
logger.info(
121122
"Auto-detected system.num_workers: -1 -> %d "
122123
"(available_cpus=%d, local_processes=%d)",
123-
config.system.num_workers, available_cpus, process_count,
124+
config.system.num_workers,
125+
available_cpus,
126+
process_count,
124127
)
125128

126129
if getattr(config.system, "num_gpus", 0) < -1:
@@ -477,7 +480,10 @@ def auto_plan_config(
477480
# Collect manual overrides (values explicitly set in config)
478481
manual_overrides = {}
479482
if hasattr(config, "data"):
480-
if hasattr(config.data, "dataloader") and getattr(config.data.dataloader, "batch_size", None) is not None:
483+
if (
484+
hasattr(config.data, "dataloader")
485+
and getattr(config.data.dataloader, "batch_size", None) is not None
486+
):
481487
manual_overrides["batch_size"] = config.data.dataloader.batch_size
482488
if hasattr(config, "system") and getattr(config.system, "num_workers", None) is not None:
483489
manual_overrides["num_workers"] = config.system.num_workers

connectomics/config/hardware/gpu_utils.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,13 +5,14 @@
55
and estimate memory requirements for training.
66
"""
77

8-
98
from __future__ import annotations
10-
import torch
11-
import psutil
9+
1210
import warnings
1311
from typing import Any, Dict, Tuple
1412

13+
import psutil
14+
import torch
15+
1516

1617
def get_gpu_info() -> Dict[str, Any]:
1718
"""

connectomics/config/hardware/slurm_utils.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,15 @@
11
"""SLURM cluster resource detection and management utilities."""
22

3-
43
from __future__ import annotations
5-
import subprocess
6-
import re
4+
75
import json
6+
import logging
7+
import re
8+
import subprocess
89
import time
10+
from dataclasses import asdict, dataclass
911
from pathlib import Path
10-
from dataclasses import dataclass, asdict
1112
from typing import Dict, List, Optional
12-
import logging
1313

1414
logger = logging.getLogger(__name__)
1515

connectomics/config/pipeline/__init__.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,20 @@
11
"""Core config loading, profile resolution, and stage merging."""
22

33
from .config_io import (
4+
create_experiment_name,
5+
from_dict,
6+
get_config_hash,
47
load_config,
5-
save_config,
68
merge_configs,
7-
update_from_cli,
8-
to_dict,
9-
from_dict,
109
print_config,
11-
validate_config,
12-
get_config_hash,
13-
create_experiment_name,
1410
resolve_data_paths,
11+
save_config,
12+
to_dict,
13+
update_from_cli,
14+
validate_config,
1515
)
16+
from .dict_utils import as_plain_dict, cfg_get, to_plain
1617
from .stage_resolver import resolve_default_profiles
17-
from .dict_utils import to_plain, as_plain_dict, cfg_get
1818

1919
__all__ = [
2020
"load_config",

connectomics/config/pipeline/config_io.py

Lines changed: 12 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,11 @@
88

99
import dataclasses
1010
import hashlib
11-
from pathlib import Path
12-
import warnings
1311
import os
1412
import re
13+
import warnings
1514
from glob import glob
15+
from pathlib import Path
1616
from typing import Any, Dict, List, Optional, Tuple, Union
1717

1818
from omegaconf import DictConfig, ListConfig, OmegaConf
@@ -24,7 +24,6 @@
2424
from .profile_engine import _YAML_PROFILE_ENGINE
2525
from .stage_resolver import _collect_explicit_paths
2626

27-
2827
# ---------------------------------------------------------------------------
2928
# Config loading helpers
3029
# ---------------------------------------------------------------------------
@@ -414,7 +413,9 @@ def _validate_cross_section_coherence(cfg: Config) -> None:
414413
context=f"inference.decoding[{i}].kwargs.{key}",
415414
)
416415
if min_channels is not None:
417-
required_channels.append((f"inference.decoding[{i}].kwargs.{key}", min_channels))
416+
required_channels.append(
417+
(f"inference.decoding[{i}].kwargs.{key}", min_channels)
418+
)
418419

419420
# 2d) TTA channel selectors
420421
tta_cfg = getattr(cfg.inference, "test_time_augmentation", None)
@@ -450,15 +451,14 @@ def _validate_cross_section_coherence(cfg: Config) -> None:
450451
context="inference.test_time_augmentation.select_channel",
451452
)
452453
if min_channels is not None:
453-
required_channels.append(
454-
("inference.test_time_augmentation.select_channel", min_channels)
455-
)
454+
required_channels.append(("inference.test_time_augmentation.select_channel", min_channels))
456455

457456
if required_channels:
458457
required_max = max(req for _, req in required_channels)
459458
if required_max > out_channels:
460459
details = ", ".join(
461-
f"{path} needs >= {req}" for path, req in sorted(required_channels, key=lambda x: x[1], reverse=True)
460+
f"{path} needs >= {req}"
461+
for path, req in sorted(required_channels, key=lambda x: x[1], reverse=True)
462462
)
463463
raise ValueError(
464464
"Cross-section validation failed: model.out_channels is "
@@ -467,7 +467,9 @@ def _validate_cross_section_coherence(cfg: Config) -> None:
467467
)
468468

469469
# 3) deep_supervision=True with architectures that don't support it
470-
deep_supervision = getattr(model_loss_cfg, "deep_supervision", False) if model_loss_cfg else False
470+
deep_supervision = (
471+
getattr(model_loss_cfg, "deep_supervision", False) if model_loss_cfg else False
472+
)
471473
if deep_supervision:
472474
arch_type = getattr(cfg.model.arch, "type", "")
473475
if not _architecture_supports_deep_supervision(arch_type):
@@ -561,6 +563,7 @@ def resolve_data_paths(cfg: Config) -> Config:
561563
>>> print(cfg.data.test.image)
562564
['/data/test/volume_1.tif', '/data/test/volume_2.tif']
563565
"""
566+
564567
def _combine_path(
565568
base_path: str, file_path: Optional[Union[str, List[str]]]
566569
) -> Optional[Union[str, List[str]]]:

0 commit comments

Comments
 (0)