Skip to content

Commit 57eef28

Browse files
committed
Fix errors with old transformers version
- Change supported versions for deepseek and qwen - ChatGLM issue is caused by NaN in tiny model outputs, tracked by internal ticket. For now, remove chatglm from genai tests. This only affects chatglm, not chatglm4.
1 parent 89695b7 commit 57eef28

1 file changed

Lines changed: 31 additions & 4 deletions

File tree

tests/openvino/test_genai.py

Lines changed: 31 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -152,6 +152,10 @@ class LLMPipelineTestCase(unittest.TestCase):
152152
ALL_SUPPORTED_ARCHITECTURES += ("glm", "mistral-nemo", "opt")
153153
if is_transformers_version("<", "5"):
154154
ALL_SUPPORTED_ARCHITECTURES += ("phimoe",)
155+
if is_transformers_version("<", "4.54.0"):
156+
ALL_SUPPORTED_ARCHITECTURES += ("deepseek",)
157+
if is_transformers_version(">=", "4.47.0") and is_transformers_version("<", "4.56.0"):
158+
ALL_SUPPORTED_ARCHITECTURES += ("qwen",)
155159
if is_transformers_version(">=", "4.48.0"):
156160
ALL_SUPPORTED_ARCHITECTURES += ("cohere2",)
157161
if is_transformers_version(">=", "4.50"):
@@ -164,14 +168,14 @@ class LLMPipelineTestCase(unittest.TestCase):
164168
ALL_SUPPORTED_ARCHITECTURES += ("arcee",)
165169
if is_transformers_version(">=", "4.54.0") and is_transformers_version("<", "5"):
166170
ALL_SUPPORTED_ARCHITECTURES += ("exaone4",)
167-
if is_transformers_version(">=", "4.55.0"):
171+
if is_transformers_version(">=", "4.55.1"):
168172
ALL_SUPPORTED_ARCHITECTURES += ("gpt_oss",)
169173

170174
# max versions
171175
if is_transformers_version("<", "4.54.0"):
172-
ALL_SUPPORTED_ARCHITECTURES += ("minicpm", "minicpm3", "arctic", "deepseek")
176+
ALL_SUPPORTED_ARCHITECTURES += ("minicpm", "minicpm3", "arctic")
173177
if is_transformers_version("<", "4.56.0"):
174-
ALL_SUPPORTED_ARCHITECTURES += ("chatglm", "chatglm4", "qwen")
178+
ALL_SUPPORTED_ARCHITECTURES += ("chatglm4",)
175179

176180
if is_transformers_version("<", "5"):
177181
ALL_SUPPORTED_ARCHITECTURES += (
@@ -195,7 +199,6 @@ class LLMPipelineTestCase(unittest.TestCase):
195199
SUPPORTED_ARCHITECTURES = NPU_SUPPORTED_ARCHITECTURES if OPENVINO_DEVICE == "NPU" else ALL_SUPPORTED_ARCHITECTURES
196200

197201
REMOTE_CODE_MODELS = (
198-
"chatglm",
199202
"minicpm",
200203
"jais",
201204
"qwen",
@@ -283,6 +286,13 @@ def test_compare_outputs(self, model_arch):
283286
genai_ids = genai_model(
284287
ov.Tensor(inputs["input_ids"].numpy()), apply_chat_template=False, **self.GEN_KWARGS
285288
).tokens[0]
289+
290+
del genai_model
291+
del transformers_model
292+
if OPENVINO_DEVICE != "NPU":
293+
del optimum_model
294+
gc.collect()
295+
286296
self.assertEqual(
287297
transformers_ids.tolist(), genai_ids, "Transformers ids and OpenVINO GenAI ids are not the same"
288298
)
@@ -413,6 +423,12 @@ def test_compare_outputs(self, model_arch):
413423
prompt, images=[ov.Tensor(np.array(image))], ignore_eos=True, apply_chat_template=True, **self.GEN_KWARGS
414424
).texts[0]
415425

426+
del genai_model
427+
del transformers_model
428+
if OPENVINO_DEVICE != "NPU":
429+
del optimum_model
430+
gc.collect()
431+
416432
# assert they are not empty
417433
self.assertTrue(transformers_output)
418434
self.assertTrue(genai_output)
@@ -477,6 +493,12 @@ def test_compare_outputs(self, model_arch):
477493

478494
genai_output = genai_model.generate(inputs["input_features"].flatten().tolist(), **self.GEN_KWARGS).texts[0]
479495

496+
del genai_model
497+
del transformers_model
498+
if OPENVINO_DEVICE != "NPU":
499+
del optimum_model
500+
gc.collect()
501+
480502
self.assertEqual(transformers_output, genai_output)
481503

482504

@@ -545,6 +567,11 @@ def test_compare_outputs(self, model_arch):
545567
genai_output = genai_model.generate(text, **self.GEN_KWARGS).speeches[0]
546568
genai_output = torch.from_numpy(genai_output.data).squeeze(0) # collapse batch dimension (if any)
547569

570+
del genai_model
571+
del optimum_model
572+
del transformers_model
573+
gc.collect()
574+
548575
torch.testing.assert_close(transformers_output, optimum_output, rtol=1e-2, atol=1e-3)
549576
torch.testing.assert_close(transformers_output, genai_output, rtol=1e-2, atol=1e-3)
550577

0 commit comments

Comments
 (0)