-
Notifications
You must be signed in to change notification settings - Fork 231
transformers v5.5 #1684
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
transformers v5.5 #1684
Changes from all commits
e6df5a0
4fcd786
9b439b4
88d4f1a
c31a88b
da7c410
5ca59b8
661f0a5
b52550f
c5f9c47
ee941ff
4a77bce
06187d0
9857954
95a6efd
dbc13a9
fd94a59
43bd816
31ffbb4
8ae40d3
6c05f54
c09aab1
620f7a5
24982a5
a80912e
1652bc8
2699f22
d1a61da
dbdf3af
fd8d155
cef3755
888bfb9
60582f8
ad5aea6
f9788e3
ea462e4
9acdc49
588f89e
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Large diffs are not rendered by default.
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
|
|
@@ -28,8 +28,8 @@ | |||||
|
|
||||||
| INSTALL_REQUIRE = [ | ||||||
| "torch>=2.1", | ||||||
| "optimum-onnx@git+https://github.com/huggingface/optimum-onnx.git@transformers-v5", | ||||||
| "transformers>=4.45,<5.1", | ||||||
| "optimum-onnx@git+https://github.com/huggingface/optimum-onnx.git@transformers-v5.5", | ||||||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
should be updated before merging (once #1690 merged and huggingface/optimum#2430 for v5.4) |
||||||
| "transformers>=4.57,<5.5", | ||||||
| "setuptools", | ||||||
| "huggingface-hub>=0.23.2,<2.0", | ||||||
| "nncf>=2.19.0", | ||||||
|
|
||||||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -97,7 +97,7 @@ | |
| TemporaryDirectory, | ||
| ) | ||
| from optimum.intel.pipelines import pipeline as optimum_pipeline | ||
| from optimum.intel.utils.import_utils import _langchain_hf_available, is_transformers_version | ||
| from optimum.intel.utils.import_utils import _langchain_hf_available, is_datasets_version, is_transformers_version | ||
| from optimum.intel.utils.modeling_utils import _find_files_matching_pattern | ||
| from optimum.utils import ( | ||
| DIFFUSION_MODEL_TEXT_ENCODER_2_SUBFOLDER, | ||
|
|
@@ -931,8 +931,8 @@ def test_pipeline(self, model_arch): | |
| @pytest.mark.run_slow | ||
| @slow | ||
| @pytest.mark.skipif( | ||
| is_transformers_version(">=", "5.3"), | ||
| reason="requires transformers < v5.3 since question-answering pipeline is deprecated in v5.3", | ||
| is_transformers_version(">=", "5.3") or is_datasets_version("<", "4"), | ||
| reason="requires datasets >= 4 or transformers < v5.3 since question-answering pipeline is deprecated in v5.3", | ||
| ) | ||
| def test_metric(self): | ||
| model_id = "distilbert-base-cased-distilled-squad" | ||
|
|
@@ -1113,7 +1113,6 @@ class OVModelForMaskedLMIntegrationTest(unittest.TestCase): | |
| "albert", | ||
| "bert", | ||
| "camembert", | ||
| "convbert", | ||
| "deberta", | ||
| "deberta-v2", | ||
| "distilbert", | ||
|
|
@@ -1131,13 +1130,16 @@ class OVModelForMaskedLMIntegrationTest(unittest.TestCase): | |
| ) | ||
|
|
||
| # accuracy issue, need additional investigation | ||
| if is_transformers_version("<", "4.51.0"): | ||
| if is_transformers_version("<", "4.51"): | ||
| SUPPORTED_ARCHITECTURES += ("nystromformer",) | ||
|
|
||
| # TODO: add fix for v5 and update MAX_TRANSFORMERS_VERSION accordingly | ||
| if is_transformers_version("<", "5"): | ||
| SUPPORTED_ARCHITECTURES += ("data2vec-text", "flaubert", "xlm") | ||
|
|
||
| if is_transformers_version("!=", "5.2"): | ||
| SUPPORTED_ARCHITECTURES += ("convbert",) | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. failing in v5.2 https://github.com/huggingface/transformers/blob/v5.2.0/src/transformers/modeling_utils.py#L2315 fixed in v5.3 since huggingface/transformers@a64996e |
||
|
|
||
| @parameterized.expand(SUPPORTED_ARCHITECTURES) | ||
| def test_compare_to_transformers(self, model_arch): | ||
| model_id = MODEL_NAMES[model_arch] | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -143,8 +143,6 @@ class OVModelForSeq2SeqLMIntegrationTest(OVSeq2SeqTestMixin): | |
| "blenderbot", | ||
| "blenderbot-small", | ||
| "longt5", | ||
| "m2m_100", | ||
| "mbart", | ||
| "pegasus", | ||
| "t5", | ||
| ) | ||
|
|
@@ -153,20 +151,18 @@ class OVModelForSeq2SeqLMIntegrationTest(OVSeq2SeqTestMixin): | |
| TASK = "text2text-generation" | ||
| GENERATION_LENGTH = 100 | ||
| SPEEDUP_CACHE = 1.1 | ||
| UNSUPPORTED_ARCHITECTURES = set() | ||
| if not (is_openvino_version(">=", "2025.3.0") and is_openvino_version("<", "2026.1")) and is_transformers_version( | ||
| "<", "5" | ||
| ): | ||
| # There are known issues with marian model on OpenVINO 2025.3.x and 2025.4.x | ||
| SUPPORTED_ARCHITECTURES += ("marian",) | ||
| else: | ||
| UNSUPPORTED_ARCHITECTURES.add("marian") | ||
|
|
||
| # TODO: add fix for v5 and update MAX_TRANSFORMERS_VERSION accordingly | ||
| if is_transformers_version("<", "5"): | ||
| SUPPORTED_ARCHITECTURES += ("mt5",) | ||
| else: | ||
| UNSUPPORTED_ARCHITECTURES.add("mt5") | ||
| _is_model_supported = { | ||
| # config loading failing coming from type mismatch coming from transformers v5.4 | ||
| "m2m_100": is_transformers_version("!=", "5.4"), | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. issue coming from mismatch in https://github.com/huggingface/transformers/blob/v5.4.0/src/transformers/models/m2m_100/configuration_m2m_100.py#L68 (introduced in v5.4, fixed in 5.5) |
||
| "mbart": is_transformers_version("!=", "5.4"), | ||
| # known issues with marian on OpenVINO 2025.3.x and 2025.4.x | ||
| "marian": not (is_openvino_version(">=", "2025.3.0") and is_openvino_version("<", "2026.1")) | ||
| and is_transformers_version("<", "5"), | ||
| # TODO: add fix for v5 and update MAX_TRANSFORMERS_VERSION accordingly (mt5) | ||
| "mt5": is_transformers_version("<", "5"), | ||
| } | ||
| SUPPORTED_ARCHITECTURES += tuple(arch for arch, supported in _is_model_supported.items() if supported) | ||
| UNSUPPORTED_ARCHITECTURES = {arch for arch, supported in _is_model_supported.items() if not supported} | ||
|
|
||
| SUPPORT_STATEFUL = ("t5", "mt5", "longt5") | ||
| if is_transformers_version(">=", "4.52.0"): | ||
|
|
@@ -445,10 +441,17 @@ def test_pipeline(self, model_arch): | |
|
|
||
|
|
||
| class OVModelForVision2SeqIntegrationTest(OVSeq2SeqTestMixin): | ||
| SUPPORTED_ARCHITECTURES = ["vision-encoder-decoder", "trocr", "donut"] | ||
| SUPPORTED_ARCHITECTURES = ["vision-encoder-decoder", "trocr"] | ||
| # GOT-OCR2 models shouldn't be exported using the task image-to-text (currently equivalent to exporting the model using image-text-to-text) and will be deprecated v1.29 | ||
| # TODO: move pix2struct tests from OVModelForPix2StructIntegrationTest | ||
| UNSUPPORTED_ARCHITECTURES = {"got_ocr2", "pix2struct"} | ||
|
|
||
| # config loading failing coming from type mismatch coming from transformers v5.4 | ||
| _is_model_supported = {"donut": is_transformers_version("!=", "5.4")} | ||
| SUPPORTED_ARCHITECTURES += [arch for arch, supported in _is_model_supported.items() if supported] | ||
| UNSUPPORTED_ARCHITECTURES = {"got_ocr2", "pix2struct"} | { | ||
| arch for arch, supported in _is_model_supported.items() if not supported | ||
| } | ||
|
|
||
| TASK = "image-to-text" | ||
| OVMODEL_CLASS = OVModelForVision2Seq | ||
| AUTOMODEL_CLASS = transformers_auto_class | ||
|
|
@@ -568,33 +571,17 @@ class OVModelForVisualCausalLMIntegrationTest(OVSeq2SeqTestMixin): | |
| if is_transformers_version(">=", "4.46.0"): | ||
| SUPPORTED_ARCHITECTURES += ["maira2"] | ||
|
|
||
| # TODO: add fix for v5 and update MAX_TRANSFORMERS_VERSION accordingly | ||
| if is_transformers_version("<", "5"): | ||
| SUPPORTED_ARCHITECTURES += ["idefics3"] | ||
|
|
||
| if is_transformers_version(">=", "4.49.0"): | ||
| SUPPORTED_ARCHITECTURES += ["qwen2_5_vl"] | ||
| SUPPORT_VIDEO.append("qwen2_5_vl") | ||
|
|
||
| # TODO: add fix for v5 and update MAX_TRANSFORMERS_VERSION accordingly | ||
| if is_transformers_version("<", "5"): | ||
| SUPPORTED_ARCHITECTURES += ["got_ocr2"] | ||
|
|
||
| if is_transformers_version("<", "4.54.0"): | ||
| # remote code models differs after transformers v4.54 | ||
| SUPPORTED_ARCHITECTURES += ["phi4mm"] | ||
| SUPPORT_AUDIO.append("phi4mm") | ||
|
|
||
| if is_transformers_version(">=", "4.50"): | ||
| SUPPORTED_ARCHITECTURES += ["gemma3"] | ||
| # TODO: add fix for v5 and update MAX_TRANSFORMERS_VERSION accordingly | ||
| if is_transformers_version("<", "5"): | ||
| SUPPORTED_ARCHITECTURES += ["smolvlm"] | ||
|
|
||
| # TODO: add fix for v5 and update MAX_TRANSFORMERS_VERSION accordingly | ||
| if is_transformers_version(">=", "4.51") and is_transformers_version("<", "5"): | ||
| # SUPPORTED_ARCHITECTURES += ["llama4", "phi4_multimodal"] | ||
| SUPPORTED_ARCHITECTURES += ["llama4"] | ||
|
|
||
| if is_transformers_version("<", "4.52"): | ||
| SUPPORTED_ARCHITECTURES += ["minicpmo"] | ||
|
|
@@ -611,10 +598,15 @@ class OVModelForVisualCausalLMIntegrationTest(OVSeq2SeqTestMixin): | |
| SUPPORTED_ARCHITECTURES += ["internvl_chat", "minicpmv"] | ||
|
|
||
| # TODO: add fix for v5 and update MAX_TRANSFORMERS_VERSION accordingly | ||
| if is_transformers_version("<", "5"): | ||
| SUPPORTED_ARCHITECTURES += ("llava_next_video",) | ||
| else: | ||
| UNSUPPORTED_ARCHITECTURES.update({"got_ocr2", "idefics3", "llama4", "llava_next_video", "smolvlm"}) | ||
| _is_model_supported = { | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. TODO: extend for all architectures (waiting for validation min required transformers version set to 4.57 / can be done in a following PR if needed) |
||
| "idefics3": is_transformers_version(">=", "4.46.0") and is_transformers_version("<", "5"), | ||
| "got_ocr2": is_transformers_version(">=", "4.49.0") and is_transformers_version("<", "5"), | ||
| "smolvlm": is_transformers_version(">=", "4.50") and is_transformers_version("<", "5"), | ||
| "llama4": is_transformers_version(">=", "4.51") and is_transformers_version("<", "5"), | ||
| "llava_next_video": is_transformers_version("<", "5"), | ||
| } | ||
| SUPPORTED_ARCHITECTURES += [arch for arch, supported in _is_model_supported.items() if supported] | ||
| UNSUPPORTED_ARCHITECTURES.update(arch for arch, supported in _is_model_supported.items() if not supported) | ||
| REMOTE_CODE_MODELS = ["internvl_chat", "minicpmv", "minicpmo", "llava-qwen2", "phi3_v", "maira2", "phi4mm"] | ||
| IMAGE = Image.open( | ||
| requests.get( | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
before v5.2 attention_mask was created in
generateby calling_prepare_attention_mask_for_generationhttps://github.com/huggingface/transformers/blob/v5.1.0/src/transformers/generation/utils.py#L2530 not the case of encoder_decoder model since v5.2 https://github.com/huggingface/transformers/blob/v5.2.0/src/transformers/generation/utils.py#L2555