Skip to content

Commit c9fb020

Browse files
committed
lint
Signed-off-by: Jennifer Chen <jennifchen@nvidia.com>
1 parent 9fbe89c commit c9fb020

File tree

1 file changed

+6
-6
lines changed

1 file changed

+6
-6
lines changed

modelopt/torch/quantization/config.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -163,12 +163,12 @@
163163
}
164164

165165
_mamba_moe_disabled_quantizer_cfg = {
166-
"*fc1_latent_proj*": {"enable": False}, # Skip Latent MOE
167-
"*fc2_latent_proj*": {"enable": False}, # Skip Latent MOE
168-
"*q_proj*": {"enable": False}, # Skip QKV Linear
169-
"*k_proj*": {"enable": False}, # Skip QKV Linear
170-
"*v_proj*": {"enable": False}, # Skip QKV Linear
171-
"*o_proj*": {"enable": False}, # Skip QKV Output Projection
166+
"*fc1_latent_proj*": {"enable": False}, # Skip Latent MOE
167+
"*fc2_latent_proj*": {"enable": False}, # Skip Latent MOE
168+
"*q_proj*": {"enable": False}, # Skip QKV Linear
169+
"*k_proj*": {"enable": False}, # Skip QKV Linear
170+
"*v_proj*": {"enable": False}, # Skip QKV Linear
171+
"*o_proj*": {"enable": False}, # Skip QKV Output Projection
172172
}
173173

174174
INT8_DEFAULT_CFG = {

0 commit comments

Comments
 (0)