Skip to content

Commit 59ce3f4

Browse files
committed
refactor: TinyMyo configuration and model for improved training efficiency and logging
1 parent f9ddfcb commit 59ce3f4

8 files changed

Lines changed: 290 additions & 557 deletions

File tree

config/experiment/TinyMyo_finetune.yaml

Lines changed: 9 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@ finetune_pretrained: True
3232
resume: False
3333

3434
layerwise_lr_decay: 0.90
35-
scheduler_type: cosine
3635

3736
pretrained_checkpoint_path: null
3837
pretrained_safetensors_path: null
@@ -52,17 +51,10 @@ defaults:
5251
- override /task: finetune_task_TinyMyo
5352
- override /criterion: finetune_criterion
5453

55-
masking:
56-
patch_size: [1, 20]
57-
masking_ratio: 0.50
58-
unmasked_loss_coeff: 0.1
59-
60-
input_normalization:
61-
normalize: False
62-
6354
model:
55+
n_layer: 8
6456
num_classes: 6
65-
classification_type: "ml"
57+
task: "classification"
6658

6759
trainer:
6860
accelerator: gpu
@@ -87,19 +79,20 @@ callbacks:
8779

8880
optimizer:
8981
optim: 'AdamW'
90-
lr: 5e-4
91-
betas: [0.9, 0.98]
92-
weight_decay: 0.01
82+
lr: 1e-4
83+
betas: [0.9, 0.999]
84+
weight_decay: 1e-2
9385

9486
scheduler:
9587
trainer: ${trainer}
96-
min_lr: 1e-5
97-
warmup_lr_init: 1e-5
88+
min_lr: 5e-6
89+
warmup_lr_init: 5e-6
9890
warmup_epochs: 5
9991
total_training_opt_steps: ${max_epochs}
10092
t_in_epochs: True
10193

10294
wandb:
10395
entity: "TinyMyo"
10496
project: "TinyMyo"
105-
save_dir: ${env:LOG_DIR}
97+
save_dir: ${env:LOG_DIR}
98+
offline: True

config/experiment/TinyMyo_pretrain.yaml

Lines changed: 25 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@ tag: EMG_pretrain
2222
gpus: -1
2323
num_nodes: 1
2424
num_workers: 8
25-
batch_size: 128
26-
max_epochs: 50
25+
batch_size: 512
26+
max_epochs: 30
2727

2828
final_validate: True
2929
final_test: False
@@ -49,22 +49,23 @@ masking:
4949
input_normalization:
5050
normalize: True
5151

52-
scheduler:
53-
trainer: ${trainer}
54-
min_lr: 1e-6
55-
warmup_lr_init: 1e-6
56-
warmup_epochs: 10
57-
total_training_opt_steps: ${max_epochs}
58-
t_in_epochs: True
52+
model:
53+
n_layer: 8
54+
drop_path: 0.0 # Stochastic depth disabled for pretraining
55+
num_classes: 0 # No classification head for pretraining
56+
task: pretraining
57+
58+
criterion:
59+
loss_type: 'smooth_l1'
5960

6061
trainer:
6162
accelerator: gpu
6263
num_nodes: ${num_nodes}
6364
devices: ${gpus}
6465
strategy: auto
66+
precision: "bf16-mixed"
6567
max_epochs: ${max_epochs}
66-
gradient_clip_val: 3
67-
accumulate_grad_batches: 8
68+
gradient_clip_val: 1
6869

6970
model_checkpoint:
7071
save_last: True
@@ -73,11 +74,21 @@ model_checkpoint:
7374
save_top_k: 1
7475

7576
optimizer:
76-
optim: 'AdamW'
77-
lr: 1e-4
77+
lr: 5e-4
7878
betas: [0.9, 0.98]
79-
weight_decay: 0.01
79+
weight_decay: 1e-2
80+
81+
scheduler:
82+
trainer: ${trainer}
83+
min_lr: 1e-6
84+
warmup_lr_init: 1e-6
85+
warmup_epochs: 3
86+
total_training_opt_steps: ${max_epochs}
87+
t_in_epochs: True
8088

8189
wandb:
90+
entity: "TinyMyo"
8291
project: "TinyMyo"
8392
save_dir: ${env:LOG_DIR}
93+
run_name: "TinyMyo-Pretraining"
94+
offline: True

config/experiment/TinyssimoMyo_finetune.yaml

Lines changed: 0 additions & 111 deletions
This file was deleted.

config/experiment/TinyssimoMyo_pretrain.yaml

Lines changed: 0 additions & 87 deletions
This file was deleted.

0 commit comments

Comments
 (0)