11Lightning Module API
22====================
33
4- .. currentmodule :: connectomics.training.lit
4+ .. currentmodule :: connectomics.training.lightning
55
66PyTorch Lightning integration for training orchestration and distributed computing.
77
@@ -20,21 +20,21 @@ Quick Example
2020.. code-block :: python
2121
2222 from connectomics.config import load_config
23- from connectomics.training.lit import (
23+ from connectomics.training.lightning import (
2424 ConnectomicsModule,
25- ConnectomicsDataModule ,
25+ create_datamodule ,
2626 create_trainer
2727 )
2828 from pytorch_lightning import seed_everything
2929
3030 # Load config
31- cfg = load_config(" tutorials/lucchi .yaml" )
31+ cfg = load_config(" tutorials/minimal .yaml" )
3232
3333 # Set seed
3434 seed_everything(cfg.system.seed)
3535
3636 # Create components
37- datamodule = ConnectomicsDataModule (cfg)
37+ datamodule = create_datamodule (cfg)
3838 model = ConnectomicsModule(cfg)
3939 trainer = create_trainer(cfg)
4040
@@ -50,7 +50,7 @@ Module Reference
5050ConnectomicsModule
5151^^^^^^^^^^^^^^^^^^
5252
53- .. autoclass :: connectomics.training.lit .ConnectomicsModule
53+ .. autoclass :: connectomics.training.lightning .ConnectomicsModule
5454 :members:
5555 :undoc-members:
5656 :show-inheritance:
@@ -72,9 +72,9 @@ ConnectomicsModule
7272 .. code-block :: python
7373
7474 from connectomics.config import load_config
75- from connectomics.training.lit import ConnectomicsModule
75+ from connectomics.training.lightning import ConnectomicsModule
7676
77- cfg = load_config(" tutorials/lucchi .yaml" )
77+ cfg = load_config(" tutorials/minimal .yaml" )
7878 model = ConnectomicsModule(cfg)
7979
8080 # Access underlying model
@@ -88,7 +88,7 @@ ConnectomicsModule
8888 .. code-block :: python
8989
9090 import torch.nn as nn
91- from connectomics.training.lit import ConnectomicsModule
91+ from connectomics.training.lightning import ConnectomicsModule
9292
9393 class MyModel (nn .Module ):
9494 def __init__ (self ):
@@ -104,7 +104,7 @@ ConnectomicsModule
104104 ConnectomicsDataModule
105105^^^^^^^^^^^^^^^^^^^^^^
106106
107- .. autoclass :: connectomics.training.lit .ConnectomicsDataModule
107+ .. autoclass :: connectomics.training.lightning .ConnectomicsDataModule
108108 :members:
109109 :undoc-members:
110110 :show-inheritance:
@@ -123,10 +123,10 @@ ConnectomicsDataModule
123123 .. code-block :: python
124124
125125 from connectomics.config import load_config
126- from connectomics.training.lit import ConnectomicsDataModule
126+ from connectomics.training.lightning import create_datamodule
127127
128- cfg = load_config(" tutorials/lucchi .yaml" )
129- datamodule = ConnectomicsDataModule (cfg)
128+ cfg = load_config(" tutorials/minimal .yaml" )
129+ datamodule = create_datamodule (cfg)
130130
131131 # Setup for training
132132 datamodule.setup(' fit' )
@@ -142,7 +142,7 @@ ConnectomicsDataModule
142142 create_trainer
143143^^^^^^^^^^^^^^
144144
145- .. autofunction :: connectomics.training.lit .create_trainer
145+ .. autofunction :: connectomics.training.lightning .create_trainer
146146
147147 Create PyTorch Lightning Trainer with appropriate callbacks.
148148
@@ -151,9 +151,9 @@ create_trainer
151151 .. code-block :: python
152152
153153 from connectomics.config import load_config
154- from connectomics.training.lit import create_trainer
154+ from connectomics.training.lightning import create_trainer
155155
156- cfg = load_config(" tutorials/lucchi .yaml" )
156+ cfg = load_config(" tutorials/minimal .yaml" )
157157 trainer = create_trainer(cfg)
158158
159159 # Access trainer properties
@@ -200,7 +200,7 @@ Enable mixed precision for faster training:
200200
201201.. code-block :: yaml
202202
203- training :
203+ optimization :
204204 precision : " 16-mixed" # FP16
205205 # or
206206 precision : " bf16-mixed" # BFloat16 (Ampere+ GPUs)
@@ -212,7 +212,7 @@ Simulate larger batch sizes:
212212
213213.. code-block :: yaml
214214
215- training :
215+ optimization :
216216 accumulate_grad_batches : 4
217217
218218 Gradient Clipping
@@ -222,9 +222,8 @@ Prevent exploding gradients:
222222
223223.. code-block :: yaml
224224
225- training :
225+ optimization :
226226 gradient_clip_val : 1.0
227- gradient_clip_algorithm : " norm" # or "value"
228227
229228 Learning Rate Scheduling
230229^^^^^^^^^^^^^^^^^^^^^^^^
@@ -233,10 +232,11 @@ Automatic LR scheduling with warmup:
233232
234233.. code-block :: yaml
235234
236- scheduler :
237- name : CosineAnnealingLR
238- warmup_epochs : 5
239- min_lr : 1e-6
235+ optimization :
236+ scheduler :
237+ name : CosineAnnealingLR
238+ warmup_epochs : 5
239+ min_lr : 1e-6
240240
241241 Deep Supervision
242242^^^^^^^^^^^^^^^^
@@ -246,10 +246,11 @@ Multi-scale loss computation:
246246.. code-block :: yaml
247247
248248 model :
249- deep_supervision : true
250- loss_functions :
251- - DiceLoss
252- loss_weights : [1.0]
249+ loss :
250+ deep_supervision : true
251+ losses :
252+ - function : DiceLoss
253+ weight : 1.0
253254
254255 The module automatically:
255256
@@ -267,23 +268,26 @@ Model Checkpointing
267268
268269.. code-block :: yaml
269270
270- checkpoint :
271- monitor : " val/loss"
272- mode : " min"
273- save_top_k : 3
274- save_last : true
275- filename : " epoch{epoch:02d}-loss{val/loss:.2f}"
271+ monitor :
272+ checkpoint :
273+ monitor : " val/loss"
274+ mode : " min"
275+ save_top_k : 3
276+ save_last : true
277+ filename : " epoch{epoch:02d}-loss{val/loss:.2f}"
276278
277279 Early Stopping
278280^^^^^^^^^^^^^^
279281
280282.. code-block :: yaml
281283
282- early_stopping :
283- monitor : " val/loss"
284- patience : 10
285- mode : " min"
286- min_delta : 0.0
284+ monitor :
285+ early_stopping :
286+ enabled : true
287+ monitor : " val/loss"
288+ patience : 10
289+ mode : " min"
290+ min_delta : 0.0
287291
288292 Learning Rate Monitoring
289293^^^^^^^^^^^^^^^^^^^^^^^^
@@ -298,9 +302,10 @@ TensorBoard (Default)
298302
299303.. code-block :: yaml
300304
301- logging :
302- save_dir : " outputs"
303- log_every_n_steps : 10
305+ monitor :
306+ logging :
307+ scalar :
308+ loss_every_n_steps : 10
304309
305310 Logs are saved to ``outputs/lightning_logs/ ``.
306311
@@ -349,7 +354,7 @@ Custom Training Step
349354
350355.. code-block :: python
351356
352- from connectomics.training.lit import ConnectomicsModule
357+ from connectomics.training.lightning import ConnectomicsModule
353358
354359 class CustomModule (ConnectomicsModule ):
355360 def training_step (self , batch , batch_idx ):
@@ -398,7 +403,7 @@ Full Dataset Inference
398403 )
399404
400405 # Create datamodule
401- datamodule = ConnectomicsDataModule (cfg)
406+ datamodule = create_datamodule (cfg)
402407
403408 # Create trainer
404409 trainer = create_trainer(cfg)
@@ -424,7 +429,7 @@ Or from command line:
424429.. code-block :: bash
425430
426431 python scripts/main.py \
427- --config tutorials/lucchi .yaml \
432+ --config tutorials/minimal .yaml \
428433 --resume outputs/last.ckpt
429434
430435 See Also
0 commit comments