@@ -88,6 +88,7 @@ def create(
8888 n_evals : int | None = 0 ,
8989 n_checkpoints : int | None = 1 ,
9090 batch_size : int | Literal ["max" ] = "max" ,
91+ gradient_accumulation_steps : int | None = None ,
9192 learning_rate : float | None = 0.00001 ,
9293 lr_scheduler_type : Literal ["linear" , "cosine" ] = "cosine" ,
9394 min_lr_ratio : float = 0.0 ,
@@ -220,6 +221,7 @@ def create(
220221 n_evals = n_evals ,
221222 n_checkpoints = n_checkpoints ,
222223 batch_size = batch_size ,
224+ gradient_accumulation_steps = gradient_accumulation_steps ,
223225 learning_rate = learning_rate ,
224226 lr_scheduler_type = lr_scheduler_type ,
225227 min_lr_ratio = min_lr_ratio ,
@@ -693,6 +695,7 @@ async def create(
693695 n_evals : int | None = 0 ,
694696 n_checkpoints : int | None = 1 ,
695697 batch_size : int | Literal ["max" ] = "max" ,
698+ gradient_accumulation_steps : int | None = None ,
696699 learning_rate : float | None = 0.00001 ,
697700 lr_scheduler_type : Literal ["linear" , "cosine" ] = "cosine" ,
698701 min_lr_ratio : float = 0.0 ,
@@ -824,6 +827,7 @@ async def create(
824827 n_evals = n_evals ,
825828 n_checkpoints = n_checkpoints ,
826829 batch_size = batch_size ,
830+ gradient_accumulation_steps = gradient_accumulation_steps ,
827831 learning_rate = learning_rate ,
828832 lr_scheduler_type = lr_scheduler_type ,
829833 min_lr_ratio = min_lr_ratio ,
0 commit comments