Skip to content

Commit 8bbb06d

Browse files
Copilotlstein
andcommitted
fix: use Qwen2_5_VLProcessor directly instead of AutoProcessor to avoid AutoConfig lookup
Co-authored-by: lstein <111189+lstein@users.noreply.github.com> Agent-Logs-Url: https://github.com/lstein/InvokeAI/sessions/1a071545-ea9e-42c2-8296-d0582d851478
1 parent a187fea commit 8bbb06d

1 file changed

Lines changed: 7 additions & 7 deletions

File tree

invokeai/app/invocations/qwen_image_edit_text_encoder.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import torch
22
from PIL import Image as PILImage
3-
from transformers import AutoProcessor, Qwen2_5_VLForConditionalGeneration
3+
from transformers import Qwen2_5_VLForConditionalGeneration, Qwen2_5_VLProcessor
44

55
from invokeai.app.invocations.baseinvocation import BaseInvocation, Classification, invocation
66
from invokeai.app.invocations.fields import (
@@ -71,14 +71,14 @@ def _encode(self, context: InvocationContext, images: list[PILImage.Image]) -> t
7171

7272
messages = [{"role": "user", "content": content}]
7373

74-
# Load the full processor (image_processor + tokenizer) from the tokenizer submodel path.
75-
# Using AutoProcessor.from_pretrained ensures all components are loaded correctly
76-
# regardless of whether the model uses Qwen2VLProcessor or Qwen2_5_VLProcessor.
77-
# For diffusers models the processor config lives in the `tokenizer` subdirectory,
78-
# so we append the submodel directory name to the root model path.
74+
# Load the processor from the tokenizer subdirectory of the diffusers root.
75+
# Qwen2_5_VLProcessor.from_pretrained is used directly instead of AutoProcessor
76+
# because AutoProcessor calls AutoConfig.from_pretrained first to determine the
77+
# model type, but the `tokenizer` subdirectory has no config.json with a model_type
78+
# key — only tokenizer/preprocessor files live there.
7979
tokenizer_config = context.models.get_config(self.qwen_vl_encoder.tokenizer)
8080
tokenizer_abs_path = context.models.get_absolute_path(tokenizer_config) / "tokenizer"
81-
processor = AutoProcessor.from_pretrained(str(tokenizer_abs_path), local_files_only=True)
81+
processor = Qwen2_5_VLProcessor.from_pretrained(str(tokenizer_abs_path), local_files_only=True)
8282

8383
text_encoder_info = context.models.load(self.qwen_vl_encoder.text_encoder)
8484
with text_encoder_info.model_on_device() as (_, text_encoder):

0 commit comments

Comments
 (0)