Skip to content

Commit 8a4cfac

Browse files
committed
Add option to save the quantized checkpoint
1 parent fdad81a commit 8a4cfac

1 file changed

Lines changed: 3 additions & 1 deletion

File tree

examples/llm_ptq/hf_ptq.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -772,7 +772,9 @@ def export_quantized(
772772
default_pad_token,
773773
):
774774
if model_type == "qwen3omni":
775-
print("Export of Qwen3Omni model is not supported yet")
775+
print("Export of Qwen3Omni model is not supported yet. Saving .pt file instead.")
776+
os.makedirs(os.path.dirname(args.export_path), exist_ok=True)
777+
mto.save(model, args.export_path)
776778
return
777779

778780
with torch.inference_mode():

0 commit comments

Comments
 (0)