Skip to content

Commit b3f57ed

Browse files
authored
Merge pull request #826 from mi804/qwen-image-edit-lowvram
fix qwen-image-edit-lowvram
2 parents 9d0683d + c9fea72 commit b3f57ed

File tree

1 file changed

+3
-1
lines changed

1 file changed

+3
-1
lines changed

diffsynth/pipelines/qwen_image.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ def enable_vram_management(self, num_persistent_param_in_dit=None, vram_limit=No
103103
vram_limit = vram_limit - vram_buffer
104104

105105
if self.text_encoder is not None:
106-
from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import Qwen2_5_VLRotaryEmbedding, Qwen2RMSNorm
106+
from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import Qwen2_5_VLRotaryEmbedding, Qwen2RMSNorm, Qwen2_5_VisionPatchEmbed, Qwen2_5_VisionRotaryEmbedding
107107
dtype = next(iter(self.text_encoder.parameters())).dtype
108108
enable_vram_management(
109109
self.text_encoder,
@@ -112,6 +112,8 @@ def enable_vram_management(self, num_persistent_param_in_dit=None, vram_limit=No
112112
torch.nn.Embedding: AutoWrappedModule,
113113
Qwen2_5_VLRotaryEmbedding: AutoWrappedModule,
114114
Qwen2RMSNorm: AutoWrappedModule,
115+
Qwen2_5_VisionPatchEmbed: AutoWrappedModule,
116+
Qwen2_5_VisionRotaryEmbedding: AutoWrappedModule,
115117
},
116118
module_config = dict(
117119
offload_dtype=dtype,

0 commit comments

Comments
 (0)