We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 6fe5740 commit aa60527Copy full SHA for aa60527
modelopt/torch/export/unified_export_hf.py
@@ -993,6 +993,11 @@ def _export_diffusers_checkpoint(
993
print(f"Export complete. Saved to: {export_dir}")
994
995
996
+# TODO: Remove this workaround once HuggingFace fixes revert_weight_conversion to handle
997
+# scalar (0-d) tensors. The bug is in transformers' Chunk.convert() which calls
998
+# tensor.size(self.dim) on quantization scale buffers that are 0-d scalars, causing
999
+# IndexError. Confirmed still present in transformers 5.2.0.
1000
+# See: transformers/core_model_loading.py, Chunk.convert()
1001
def _revert_weight_conversion_noop(model: Any, state_dict: dict) -> dict:
1002
"""No-op replacement for transformers' revert_weight_conversion."""
1003
return state_dict
0 commit comments