We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent df090ec commit 84b4ec8Copy full SHA for 84b4ec8
1 file changed
modelopt/onnx/utils.py
@@ -1666,9 +1666,7 @@ def fold_qdq_scale_fp16_to_fp32_casts(onnx_model: onnx.ModelProto) -> onnx.Model
1666
# other ops would silently receive FP16 instead of the FP32 they requested.
1667
cast_output = cast_node.output[0]
1668
consumers = consumer_map.get(cast_output, [])
1669
- if not consumers or not all(
1670
- c.op_type in qdq_ops and i == 1 for c, i in consumers
1671
- ):
+ if not consumers or not all(c.op_type in qdq_ops and i == 1 for c, i in consumers):
1672
continue
1673
1674
# Bypass the cast so the scale stays FP16
0 commit comments