Skip to content

Commit a91d572

Browse files
committed
NXP backend: Fix incorrect quantization range
1 parent 37c79bd commit a91d572

1 file changed

Lines changed: 4 additions & 4 deletions

File tree

backends/transforms/quantize_fused_convbn_bias_pass.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -246,7 +246,7 @@ def _quantize_fused_conv_bias(
246246
bias_scale,
247247
bias_zp,
248248
0,
249-
-(2**31),
249+
-(2**31) + 1,
250250
2**31 - 1,
251251
torch.int32,
252252
)
@@ -267,7 +267,7 @@ def _quantize_fused_conv_bias(
267267
scale_node,
268268
zp_node,
269269
0,
270-
-(2**31),
270+
-(2**31) + 1,
271271
2**31 - 1,
272272
torch.int32,
273273
),
@@ -279,14 +279,14 @@ def _quantize_fused_conv_bias(
279279
bias_scale = input_dequant.args[1] * weight_scale
280280

281281
qbias = torch.ops.quantized_decomposed.quantize_per_tensor.default(
282-
bias, bias_scale, 0, -(2**31), 2**31 - 1, torch.int32
282+
bias, bias_scale, 0, -(2**31) + 1, 2**31 - 1, torch.int32
283283
)
284284
set_param(bias_node, qbias)
285285

286286
with graph_module.graph.inserting_before(node):
287287
bias_dequant = graph_module.graph.call_function(
288288
dq_per_tensor,
289-
(bias_node, bias_scale, 0, -(2**31), 2**31 - 1, torch.int32),
289+
(bias_node, bias_scale, 0, -(2**31) + 1, 2**31 - 1, torch.int32),
290290
)
291291
bias_dequant.meta["val"] = dequant_val
292292
node.replace_input_with(bias_node, bias_dequant)

0 commit comments

Comments
 (0)