Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
166 changes: 166 additions & 0 deletions examples/qwen3_5/example_config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,166 @@
trainer_type: fsdp2_trainer
dataset_config:
extra_kwargs: {}
dataset_type: qwen3_vl_iterable
dataset_format: yaml
processor_config:
processor_name: Qwen/Qwen3-VL-8B-Instruct
processor_type: qwen3_vl
dataset_path: data/video/debug.yaml
datasets: null
shuffle: true
eval_dataset_path: null
object_storage: none
bucket_name: null
packing: false
packing_strategy: first_fit
packing_length: 51200
filter_overlong: true
filter_overlong_workers: 8
max_length: null
video_sampling_strategy: fps
video_max_pixels: 50176
video_max_frames: 512
frame_num: 64
fps: 1
video_backend: qwen_vl_utils
trainer_args:
output_dir: ./output/qwen3_5_training
do_train: false
do_eval: false
do_predict: false
eval_strategy: 'no'
prediction_loss_only: false
per_device_train_batch_size: 1
per_device_eval_batch_size: 8
gradient_accumulation_steps: 1
eval_accumulation_steps: null
eval_delay: 0
torch_empty_cache_steps: null
learning_rate: 0.0002
weight_decay: 0.0
adam_beta1: 0.9
adam_beta2: 0.999
adam_epsilon: 1.0e-08
max_grad_norm: 1.0
num_train_epochs: 1
max_steps: 1000
lr_scheduler_type: cosine
lr_scheduler_kwargs: {}
warmup_ratio: 0.1
warmup_steps: 0
log_level: passive
log_level_replica: warning
log_on_each_node: true
logging_dir: ./output/qwen3_5_training/runs
logging_strategy: steps
logging_first_step: false
logging_steps: 1
logging_nan_inf_filter: true
save_strategy: steps
save_steps: 1000
save_total_limit: 1
save_on_each_node: false
save_only_model: false
restore_callback_states_from_checkpoint: false
use_cpu: false
seed: 42
data_seed: null
bf16: true
fp16: false
bf16_full_eval: false
fp16_full_eval: false
tf32: null
local_rank: 0
ddp_backend: null
debug: []
dataloader_drop_last: false
eval_steps: null
dataloader_num_workers: 0
dataloader_prefetch_factor: null
run_name: qwen3_5_debug
disable_tqdm: false
remove_unused_columns: true
label_names: null
load_best_model_at_end: false
metric_for_best_model: null
greater_is_better: null
ignore_data_skip: false
fsdp: []
fsdp_config:
transformer_layer_cls_to_wrap:
- Qwen3_5DecoderLayer
reshard_after_forward: false
min_num_params: 0
xla: false
xla_fsdp_v2: false
xla_fsdp_grad_ckpt: false
accelerator_config:
split_batches: false
dispatch_batches: null
even_batches: true
use_seedable_sampler: true
non_blocking: false
gradient_accumulation_kwargs: null
parallelism_config: null
deepspeed: null
label_smoothing_factor: 0.0
optim: adamw_torch_fused
optim_args: null
length_column_name: length
report_to: []
project: huggingface
trackio_space_id: trackio
ddp_find_unused_parameters: null
ddp_bucket_cap_mb: null
ddp_broadcast_buffers: null
dataloader_pin_memory: true
dataloader_persistent_workers: false
skip_memory_metrics: true
push_to_hub: false
resume_from_checkpoint: null
hub_model_id: null
hub_strategy: every_save
hub_token: <HUB_TOKEN>
hub_private_repo: null
hub_always_push: false
hub_revision: null
gradient_checkpointing: true
gradient_checkpointing_kwargs: null
include_for_metrics: []
eval_do_concat_batches: true
auto_find_batch_size: false
full_determinism: false
ddp_timeout: 1800
torch_compile: false
torch_compile_backend: null
torch_compile_mode: null
include_num_input_tokens_seen: 'no'
neftune_noise_alpha: null
optim_target_modules: null
batch_eval_metrics: false
eval_on_start: false
use_liger_kernel: true
liger_kernel_config: null
eval_use_gather_object: false
average_tokens_across_devices: true
use_muon: false
freeze_modules: null
use_rmpad: true
fsdp2: true
sp_ulysses_degree: 1
reduce_dtype: bfloat16
output_dtype: bfloat16
print_batch_input_steps: 5
enable_profiler: false
profiler_config:
start_step: 1
end_step: 3
model_config:
extra_kwargs: {}
load_from_pretrained_path: Qwen/Qwen3.5-VL-8B-Instruct
load_from_config: null
attn_implementation: flash_attention_2
overwrite_config: null
monkey_patch_kwargs: null
extra_kwargs: null
2 changes: 2 additions & 0 deletions src/lmms_engine/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from .qwen2_5_vl import apply_liger_kernel_to_qwen2_5_vl
from .qwen2_audio import apply_liger_kernel_to_qwen2_audio
from .qwen3 import apply_liger_kernel_to_qwen3
from .qwen3_5 import apply_liger_kernel_to_qwen3_5
from .qwen3_moe import apply_liger_kernel_to_qwen3_moe
from .qwen3_omni_moe import (
Qwen3OmniMoeThinkerConfig,
Expand Down Expand Up @@ -48,6 +49,7 @@
"apply_liger_kernel_to_qwen2_5_omni",
"apply_liger_kernel_to_qwen2_5_vl",
"apply_liger_kernel_to_qwen2_audio",
"apply_liger_kernel_to_qwen3_5",
"apply_liger_kernel_to_qwen3_vl",
"apply_liger_kernel_to_qwen3_vl_moe",
"apply_liger_kernel_to_qwen3_moe",
Expand Down
3 changes: 3 additions & 0 deletions src/lmms_engine/models/qwen3_5/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
from .monkey_patch import apply_liger_kernel_to_qwen3_5

__all__ = ["apply_liger_kernel_to_qwen3_5"]
106 changes: 106 additions & 0 deletions src/lmms_engine/models/qwen3_5/monkey_patch.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
from functools import partial, wraps

try:
from liger_kernel.transformers.cross_entropy import LigerCrossEntropyLoss
from liger_kernel.transformers.monkey_patch import (
_patch_rms_norm_module,
_patch_swiglu_module,
)
from liger_kernel.transformers.rms_norm import LigerRMSNorm
from liger_kernel.transformers.rope import liger_rotary_pos_emb
from liger_kernel.transformers.swiglu import LigerSwiGLUMLP
except Exception:
print("liger kernel not installed, please install it with `pip install liger-kernel`")

from loguru import logger
from transformers import PreTrainedModel

from lmms_engine.models.monkey_patch import MONKEY_PATCHER


@MONKEY_PATCHER.register("qwen3_5_text", "liger")
def apply_liger_kernel_to_qwen3_5(
rope: bool = True,
cross_entropy: bool = False,
fused_linear_cross_entropy: bool = True,
rms_norm: bool = True,
swiglu: bool = True,
model: PreTrainedModel = None,
use_rmpad: bool = False,
) -> None:
assert not (
cross_entropy and fused_linear_cross_entropy
), "cross_entropy and fused_linear_cross_entropy cannot both be True."

from transformers.models.qwen3_5 import modeling_qwen3_5

if rope:
modeling_qwen3_5.apply_rotary_pos_emb = liger_rotary_pos_emb
if rms_norm:
modeling_qwen3_5.Qwen3_5RMSNorm = LigerRMSNorm

if fused_linear_cross_entropy:
from .qwen3_5_liger import qwen3_5_lce_forward

if use_rmpad:

def wrap_forward(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(use_rmpad=use_rmpad, *args, **kwargs)

return wrapper

qwen3_5_lce_forward = wrap_forward(qwen3_5_lce_forward)
modeling_qwen3_5.Qwen3_5ForCausalLM.forward = qwen3_5_lce_forward

if swiglu:
modeling_qwen3_5.Qwen3_5MLP = LigerSwiGLUMLP

if use_rmpad:
from .qwen3_5_ops import attn_forward as qwen3_5_ops_attn_forward
from .qwen3_5_ops import (
decoder_layer_forward as qwen3_5_ops_decoder_layer_forward,
)
from .qwen3_5_ops import model_forward as qwen3_5_ops_model_forward

modeling_qwen3_5.Qwen3_5TextModel.forward = qwen3_5_ops_model_forward
modeling_qwen3_5.Qwen3_5DecoderLayer.forward = qwen3_5_ops_decoder_layer_forward
modeling_qwen3_5.Qwen3_5Attention.forward = qwen3_5_ops_attn_forward

if model is not None:
from transformers.models.qwen3_5.modeling_qwen3_5 import (
Qwen3_5ForCausalLM,
Qwen3_5TextModel,
)

if isinstance(model, Qwen3_5ForCausalLM):
base_model: Qwen3_5TextModel = model.model
elif isinstance(model, Qwen3_5TextModel):
base_model: Qwen3_5TextModel = model
elif hasattr(model, "language_model"):
base_model = getattr(
model.language_model,
model.language_model.base_model_prefix,
model.language_model,
)
else:
base_model = getattr(model, "model", model)

_patch_qwen3_5_rms_norm = partial(_patch_rms_norm_module, offset=1.0, casting_mode="llama")

if rms_norm:
_patch_qwen3_5_rms_norm(base_model.norm)

for decoder_layer in base_model.layers:
if swiglu:
_patch_swiglu_module(decoder_layer.mlp, LigerSwiGLUMLP)
if rms_norm:
_patch_qwen3_5_rms_norm(decoder_layer.input_layernorm)
_patch_qwen3_5_rms_norm(decoder_layer.post_attention_layernorm)
self_attn = getattr(decoder_layer, "self_attn", None)
if self_attn is not None:
if hasattr(self_attn, "q_norm") and self_attn.q_norm is not None:
_patch_qwen3_5_rms_norm(self_attn.q_norm)
if hasattr(self_attn, "k_norm") and self_attn.k_norm is not None:
_patch_qwen3_5_rms_norm(self_attn.k_norm)
Loading