Skip to content

Commit d049fb6

Browse files
committed
bugfix
1 parent 144365b commit d049fb6

File tree

2 files changed

+2
-16
lines changed

2 files changed

+2
-16
lines changed

examples/flux/model_training/train.py

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -119,11 +119,4 @@ def forward(self, data, inputs=None):
119119
)
120120
optimizer = torch.optim.AdamW(model.trainable_modules(), lr=args.learning_rate, weight_decay=args.weight_decay)
121121
scheduler = torch.optim.lr_scheduler.ConstantLR(optimizer)
122-
launch_training_task(
123-
dataset, model, model_logger, optimizer, scheduler,
124-
num_epochs=args.num_epochs,
125-
gradient_accumulation_steps=args.gradient_accumulation_steps,
126-
save_steps=args.save_steps,
127-
find_unused_parameters=args.find_unused_parameters,
128-
num_workers=args.dataset_num_workers,
129-
)
122+
launch_training_task(dataset, model, model_logger, args=args)

examples/wanvideo/model_training/train.py

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -128,11 +128,4 @@ def forward(self, data, inputs=None):
128128
)
129129
optimizer = torch.optim.AdamW(model.trainable_modules(), lr=args.learning_rate, weight_decay=args.weight_decay)
130130
scheduler = torch.optim.lr_scheduler.ConstantLR(optimizer)
131-
launch_training_task(
132-
dataset, model, model_logger, optimizer, scheduler,
133-
num_epochs=args.num_epochs,
134-
gradient_accumulation_steps=args.gradient_accumulation_steps,
135-
save_steps=args.save_steps,
136-
find_unused_parameters=args.find_unused_parameters,
137-
num_workers=args.dataset_num_workers,
138-
)
131+
launch_training_task(dataset, model, model_logger, args=args)

0 commit comments

Comments
 (0)