Skip to content

Commit ba49490

Browse files
committed
fix: quant config err on quantized offline model
Signed-off-by: h-guo18 <67671475+h-guo18@users.noreply.github.com>
1 parent 52e662d commit ba49490

File tree

1 file changed

+4
-7
lines changed

1 file changed

+4
-7
lines changed

modelopt/torch/speculative/plugins/transformers.py

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@
4848
)
4949
from transformers.trainer_pt_utils import LabelSmoother
5050
from transformers.utils import ModelOutput
51-
from transformers.utils.quantization_config import QuantizationMethod
51+
from transformers.utils.quantization_config import CompressedTensorsConfig
5252

5353
from ..eagle.conversion import EagleDMRegistry
5454
from ..eagle.eagle_model import EagleModel
@@ -585,12 +585,9 @@ def modify(
585585
self.eagle_config._attn_implementation = "sdpa"
586586

587587
# Patch for Kimi-K2-Thinking, avoid quantizing drafter
588-
if (
589-
hasattr(self.config, "quantization_config")
590-
and self.config.quantization_config.quant_method
591-
== QuantizationMethod.COMPRESSED_TENSORS
592-
):
593-
self.config.quantization_config.quantization_config.ignore.append("re:.*eagle_module.*")
588+
quant_config = getattr(self.config, "quantization_config", None)
589+
if isinstance(quant_config, CompressedTensorsConfig):
590+
quant_config.ignore.append("re:.*eagle_module.*")
594591

595592
# Set default aux_hidden_state layers
596593
if (

0 commit comments

Comments
 (0)