Skip to content

Commit 3470e97

Browse files
committed
up[
1 parent a5eaf4a commit 3470e97

1 file changed

Lines changed: 10 additions & 53 deletions

File tree

src/diffusers/loaders/lora_pipeline.py

Lines changed: 10 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -3628,6 +3628,7 @@ class KandinskyLoraLoaderMixin(LoraBaseMixin):
36283628

36293629
@classmethod
36303630
@validate_hf_hub_args
3631+
# Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.lora_state_dict
36313632
def lora_state_dict(
36323633
cls,
36333634
pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],
@@ -3679,6 +3680,7 @@ def lora_state_dict(
36793680
out = (state_dict, metadata) if return_lora_metadata else state_dict
36803681
return out
36813682

3683+
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.load_lora_weights
36823684
def load_lora_weights(
36833685
self,
36843686
pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],
@@ -3722,6 +3724,7 @@ def load_lora_weights(
37223724
)
37233725

37243726
@classmethod
3727+
# Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.load_lora_into_transformer
37253728
def load_lora_into_transformer(
37263729
cls,
37273730
state_dict,
@@ -3733,21 +3736,7 @@ def load_lora_into_transformer(
37333736
metadata=None,
37343737
):
37353738
"""
3736-
Load the LoRA layers specified in `state_dict` into `transformer`.
3737-
3738-
Parameters:
3739-
state_dict (`dict`):
3740-
A standard state dict containing the lora layer parameters.
3741-
transformer (`Kandinsky5Transformer3DModel`):
3742-
The transformer model to load the LoRA layers into.
3743-
adapter_name (`str`, *optional*):
3744-
Adapter name to be used for referencing the loaded adapter model.
3745-
low_cpu_mem_usage (`bool`, *optional*):
3746-
Speed up model loading by only loading the pretrained LoRA weights.
3747-
hotswap (`bool`, *optional*):
3748-
See [`~loaders.KandinskyLoraLoaderMixin.load_lora_weights`].
3749-
metadata (`dict`):
3750-
Optional LoRA adapter metadata.
3739+
See [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_into_unet`] for more details.
37513740
"""
37523741
if low_cpu_mem_usage and not is_peft_version(">=", "0.13.1"):
37533742
raise ValueError(
@@ -3767,6 +3756,7 @@ def load_lora_into_transformer(
37673756
)
37683757

37693758
@classmethod
3759+
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.save_lora_weights
37703760
def save_lora_weights(
37713761
cls,
37723762
save_directory: Union[str, os.PathLike],
@@ -3778,21 +3768,7 @@ def save_lora_weights(
37783768
transformer_lora_adapter_metadata=None,
37793769
):
37803770
r"""
3781-
Save the LoRA parameters corresponding to the transformer and text encoders.
3782-
3783-
Arguments:
3784-
save_directory (`str` or `os.PathLike`):
3785-
Directory to save LoRA parameters to.
3786-
transformer_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
3787-
State dict of the LoRA layers corresponding to the `transformer`.
3788-
is_main_process (`bool`, *optional*, defaults to `True`):
3789-
Whether the process calling this is the main process.
3790-
save_function (`Callable`):
3791-
The function to use to save the state dictionary.
3792-
safe_serialization (`bool`, *optional*, defaults to `True`):
3793-
Whether to save the model using `safetensors` or the traditional PyTorch way.
3794-
transformer_lora_adapter_metadata:
3795-
LoRA adapter metadata associated with the transformer.
3771+
See [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for more information.
37963772
"""
37973773
lora_layers = {}
37983774
lora_metadata = {}
@@ -3814,6 +3790,7 @@ def save_lora_weights(
38143790
safe_serialization=safe_serialization,
38153791
)
38163792

3793+
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.fuse_lora
38173794
def fuse_lora(
38183795
self,
38193796
components: List[str] = ["transformer"],
@@ -3823,25 +3800,7 @@ def fuse_lora(
38233800
**kwargs,
38243801
):
38253802
r"""
3826-
Fuses the LoRA parameters into the original parameters of the corresponding blocks.
3827-
3828-
Args:
3829-
components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into.
3830-
lora_scale (`float`, defaults to 1.0):
3831-
Controls how much to influence the outputs with the LoRA parameters.
3832-
safe_fusing (`bool`, defaults to `False`):
3833-
Whether to check fused weights for NaN values before fusing.
3834-
adapter_names (`List[str]`, *optional*):
3835-
Adapter names to be used for fusing.
3836-
3837-
Example:
3838-
```py
3839-
from diffusers import Kandinsky5T2VPipeline
3840-
3841-
pipeline = Kandinsky5T2VPipeline.from_pretrained("ai-forever/Kandinsky-5.0-T2V")
3842-
pipeline.load_lora_weights("path/to/lora.safetensors")
3843-
pipeline.fuse_lora(lora_scale=0.7)
3844-
```
3803+
See [`~loaders.StableDiffusionLoraLoaderMixin.fuse_lora`] for more details.
38453804
"""
38463805
super().fuse_lora(
38473806
components=components,
@@ -3851,12 +3810,10 @@ def fuse_lora(
38513810
**kwargs,
38523811
)
38533812

3813+
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.unfuse_lora
38543814
def unfuse_lora(self, components: List[str] = ["transformer"], **kwargs):
38553815
r"""
3856-
Reverses the effect of [`pipe.fuse_lora()`].
3857-
3858-
Args:
3859-
components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.
3816+
See [`~loaders.StableDiffusionLoraLoaderMixin.unfuse_lora`] for more details.
38603817
"""
38613818
super().unfuse_lora(components=components, **kwargs)
38623819

0 commit comments

Comments
 (0)