-
Notifications
You must be signed in to change notification settings - Fork 34
Expand file tree
/
Copy pathdllm_train_adam_multi_gpu_deepspeed.yaml
More file actions
48 lines (47 loc) · 1.27 KB
/
dllm_train_adam_multi_gpu_deepspeed.yaml
File metadata and controls
48 lines (47 loc) · 1.27 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
trainer_type: dllm_trainer
dataset_config:
dataset_type: fineweb_edu
dataset_format: hf_dataset
dataset_path: HuggingFaceFW/fineweb-edu
packing_length: 2048
processor_config:
processor_name: "Qwen/Qwen3-0.6B"
processor_type: "pure_text"
model_config:
attn_implementation: sdpa
load_from_config:
model_type: qwen3_dllm
config:
vocab_size: 151936
hidden_size: 1024
intermediate_size: 4096
num_hidden_layers: 24
use_cache: false
trainer_args:
per_device_train_batch_size: 32 # it should be multiple of world_size if we set split_batches to true
learning_rate: 0.001
weight_decay: 0.01
gradient_accumulation_steps: 16
gradient_checkpointing: true
max_steps: 10000
save_steps: 100
save_total_limit: 1
warmup_steps: 1000
report_to: wandb
output_dir: ./output/debug102
run_name: lmms_engine_dllm_test_fineweb_edu
eval_strategy: "no"
logging_steps: 10
seed: 42
dataloader_num_workers: 4
bf16: true
fp16: false
accelerator_config:
split_batches: true
lr_scheduler_type: cosine
use_liger_kernel: false
use_rmpad: true
include_num_input_tokens_seen: true
dataloader_drop_last: true
sp_ulysses_degree: 1
deepspeed: /home/libo/dllm/lmms-engine-mini-test/config/ds_config/default_config.json