We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent fdad81a commit 8a4cfacCopy full SHA for 8a4cfac
1 file changed
examples/llm_ptq/hf_ptq.py
@@ -772,7 +772,9 @@ def export_quantized(
772
default_pad_token,
773
):
774
if model_type == "qwen3omni":
775
- print("Export of Qwen3Omni model is not supported yet")
+ print("Export of Qwen3Omni model is not supported yet. Saving .pt file instead.")
776
+ os.makedirs(os.path.dirname(args.export_path), exist_ok=True)
777
+ mto.save(model, args.export_path)
778
return
779
780
with torch.inference_mode():
0 commit comments