Skip to content

Commit 613e6a1

Browse files
Apply style fixes
1 parent b08b26e commit 613e6a1

2 files changed

Lines changed: 18 additions & 20 deletions

File tree

src/diffusers/loaders/lora_conversion_utils.py

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -2643,8 +2643,8 @@ def _convert_to_ai_toolkit_cat(sds_sd, ait_sd, sds_key, ait_keys, dims=None):
26432643
def _nearest_kronecker_product(matrix, m1, n1, m2, n2):
26442644
"""Find the nearest rank-1 Kronecker product approximation (Van Loan & Pitsianis).
26452645
2646-
Given matrix M of shape (m1*m2, n1*n2), finds w1 (m1, n1) and w2 (m2, n2)
2647-
minimizing ||M - kron(w1, w2)||_F via rank-1 SVD of a rearranged matrix.
2646+
Given matrix M of shape (m1*m2, n1*n2), finds w1 (m1, n1) and w2 (m2, n2) minimizing ||M - kron(w1, w2)||_F via
2647+
rank-1 SVD of a rearranged matrix.
26482648
"""
26492649
# Rearrange M into R of shape (m1*n1, m2*n2)
26502650
# R[i*n1+j, k*n2+l] = M[i*m2+k, j*n2+l]
@@ -2661,8 +2661,8 @@ def _nearest_kronecker_product(matrix, m1, n1, m2, n2):
26612661
def _split_lokr_qkv(w1, w2, target_keys, factor):
26622662
"""Split fused LoKR QKV factors into separate per-projection Kronecker factors.
26632663
2664-
Materializes kron(w1, w2), chunks along dim=0, and re-factorizes each chunk
2665-
as a rank-1 Kronecker product using the Van Loan algorithm.
2664+
Materializes kron(w1, w2), chunks along dim=0, and re-factorizes each chunk as a rank-1 Kronecker product using the
2665+
Van Loan algorithm.
26662666
26672667
Args:
26682668
w1: First Kronecker factor, shape (f, f) where f = decompose_factor.
@@ -2694,9 +2694,8 @@ def _convert_non_diffusers_flux2_lokr_to_diffusers(state_dict, fuse_qkv=False):
26942694
Args:
26952695
state_dict: BFL-format LoKR state dict with ``diffusion_model.`` prefix.
26962696
fuse_qkv: If True, map fused QKV directly to ``to_qkv``/``to_added_qkv`` targets
2697-
(lossless, but requires the model's QKV to be fused before injection).
2698-
If False (default), split fused QKV into separate Q/K/V via Kronecker
2699-
re-factorization (slightly lossy, no model fusion needed).
2697+
(lossless, but requires the model's QKV to be fused before injection). If False (default), split fused QKV
2698+
into separate Q/K/V via Kronecker re-factorization (slightly lossy, no model fusion needed).
27002699
"""
27012700
converted_state_dict = {}
27022701

@@ -2891,8 +2890,8 @@ def _bake_lokr_alpha(state_dict):
28912890
def _convert_lycoris_flux2_lokr_to_diffusers(state_dict):
28922891
"""Convert LyCORIS underscore-format Flux2 LoKR state dict to peft-compatible diffusers format.
28932892
2894-
LyCORIS keys use underscore-encoded paths (e.g., lycoris_transformer_blocks_0_attn_to_q.lokr_w1).
2895-
Decodes these to dotted diffusers paths using a known sub-path lookup table.
2893+
LyCORIS keys use underscore-encoded paths (e.g., lycoris_transformer_blocks_0_attn_to_q.lokr_w1). Decodes these to
2894+
dotted diffusers paths using a known sub-path lookup table.
28962895
"""
28972896
import re
28982897

@@ -2938,8 +2937,8 @@ def _convert_lycoris_flux2_lokr_to_diffusers(state_dict):
29382937
def _convert_diffusers_flux2_lokr_to_peft(state_dict):
29392938
"""Convert diffusers-native Flux2 LoKR state dict by adding transformer. prefix and baking alpha.
29402939
2941-
Diffusers-native keys already use dotted module paths matching the model structure.
2942-
Only alpha baking and the transformer. prefix are needed.
2940+
Diffusers-native keys already use dotted module paths matching the model structure. Only alpha baking and the
2941+
transformer. prefix are needed.
29432942
"""
29442943
original_state_dict = dict(state_dict)
29452944
_bake_lokr_alpha(original_state_dict)

src/diffusers/utils/peft_utils.py

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -347,13 +347,12 @@ def check_peft_version(min_version: str) -> None:
347347
def _create_lokr_config(state_dict):
348348
"""Create a peft LoKrConfig from a converted LoKR state dict.
349349
350-
Infers rank, decompose_both, decompose_factor, and target_modules from the state dict key names
351-
and tensor shapes. Alpha scaling is assumed to be already baked into the weights, so config
352-
alpha = r (scaling = 1.0).
350+
Infers rank, decompose_both, decompose_factor, and target_modules from the state dict key names and tensor shapes.
351+
Alpha scaling is assumed to be already baked into the weights, so config alpha = r (scaling = 1.0).
353352
354-
Peft determines w2 decomposition via ``r < max(out_k, in_n) / 2``. We must set per-module rank
355-
values that reproduce the same decomposition pattern as the checkpoint. For modules with full
356-
(non-decomposed) lokr_w2, we set rank = max(lokr_w2.shape) so that peft also creates a full w2.
353+
Peft determines w2 decomposition via ``r < max(out_k, in_n) / 2``. We must set per-module rank values that
354+
reproduce the same decomposition pattern as the checkpoint. For modules with full (non-decomposed) lokr_w2, we set
355+
rank = max(lokr_w2.shape) so that peft also creates a full w2.
357356
"""
358357
from peft import LoKrConfig
359358

@@ -431,9 +430,9 @@ def _create_lokr_config(state_dict):
431430
def _convert_adapter_to_lora(model, rank, adapter_name="default"):
432431
"""Convert a loaded non-LoRA peft adapter (e.g., LoKR) to LoRA via truncated SVD.
433432
434-
Wraps ``peft.convert_to_lora`` which materializes each adapter layer's delta weight
435-
and decomposes it as ``U @ diag(S) @ V ≈ lora_B @ lora_A``. The conversion is lossy:
436-
higher ``rank`` preserves more fidelity at the cost of larger LoRA matrices.
433+
Wraps ``peft.convert_to_lora`` which materializes each adapter layer's delta weight and decomposes it as ``U @
434+
diag(S) @ V ≈ lora_B @ lora_A``. The conversion is lossy: higher ``rank`` preserves more fidelity at the cost of
435+
larger LoRA matrices.
437436
438437
Args:
439438
model: ``nn.Module`` with a peft adapter already injected.

0 commit comments

Comments
 (0)