Skip to content

Commit ba020d5

Browse files
ajrasaneclaude
andcommitted
Fix ruff formatting issues
Signed-off-by: ajrasane <arasane@nvidia.com> Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com> Signed-off-by: ajrasane <131806219+ajrasane@users.noreply.github.com>
1 parent 54cefb2 commit ba020d5

File tree

2 files changed

+6
-4
lines changed

2 files changed

+6
-4
lines changed

examples/torch_onnx/torch_quant_to_onnx.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,11 @@ def _calibrate_uncalibrated_quantizers(model, data_loader):
166166
if not hasattr(module, attr_name):
167167
continue
168168
quantizer = getattr(module, attr_name)
169-
if quantizer.is_enabled and not quantizer.block_sizes and not hasattr(quantizer, "_amax"):
169+
if (
170+
quantizer.is_enabled
171+
and not quantizer.block_sizes
172+
and not hasattr(quantizer, "_amax")
173+
):
170174
quantizer.enable_calib()
171175
uncalibrated.append(quantizer)
172176

modelopt/torch/_deploy/utils/torch_onnx.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -587,9 +587,7 @@ def get_onnx_bytes_and_metadata(
587587
# Disable FP8 Conv weight quantizers: TorchScript ONNX exporter requires static
588588
# kernel shapes, but FP8 DequantizeLinear produces dynamic shapes.
589589
conv_wq_context = (
590-
_disable_fp8_conv_weight_quantizers(model)
591-
if is_fp8_quantized(model)
592-
else nullcontext()
590+
_disable_fp8_conv_weight_quantizers(model) if is_fp8_quantized(model) else nullcontext()
593591
)
594592
with torch.inference_mode(), autocast, quantizer_context, conv_wq_context:
595593
additional_kwargs = {}

0 commit comments

Comments
 (0)