|
| 1 | +# coding=utf-8 |
| 2 | +# Copyright 2025 HuggingFace Inc. |
| 3 | +# |
| 4 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | +# you may not use this file except in compliance with the License. |
| 6 | +# You may obtain a copy of the License at |
| 7 | +# |
| 8 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | +# |
| 10 | +# Unless required by applicable law or agreed to in writing, software |
| 11 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | +# See the License for the specific language governing permissions and |
| 14 | +# limitations under the License. |
| 15 | + |
| 16 | +import pytest |
1 | 17 | import torch |
2 | 18 |
|
3 | 19 | from diffusers import LongCatAudioDiTTransformer |
| 20 | +from diffusers.utils.torch_utils import randn_tensor |
| 21 | + |
| 22 | +from ...testing_utils import enable_full_determinism, torch_device |
| 23 | +from ..testing_utils import ( |
| 24 | + AttentionTesterMixin, |
| 25 | + BaseModelTesterConfig, |
| 26 | + MemoryTesterMixin, |
| 27 | + ModelTesterMixin, |
| 28 | + TorchCompileTesterMixin, |
| 29 | +) |
| 30 | + |
| 31 | + |
| 32 | +enable_full_determinism() |
| 33 | + |
| 34 | + |
| 35 | +class LongCatAudioDiTTransformerTesterConfig(BaseModelTesterConfig): |
| 36 | + @property |
| 37 | + def main_input_name(self) -> str: |
| 38 | + return "hidden_states" |
| 39 | + |
| 40 | + @property |
| 41 | + def model_class(self): |
| 42 | + return LongCatAudioDiTTransformer |
| 43 | + |
| 44 | + @property |
| 45 | + def output_shape(self) -> tuple[int, ...]: |
| 46 | + return (16, 8) |
| 47 | + |
| 48 | + @property |
| 49 | + def generator(self): |
| 50 | + return torch.Generator("cpu").manual_seed(0) |
| 51 | + |
| 52 | + def get_init_dict(self) -> dict[str, int | bool | float | str]: |
| 53 | + return { |
| 54 | + "dit_dim": 64, |
| 55 | + "dit_depth": 2, |
| 56 | + "dit_heads": 4, |
| 57 | + "dit_text_dim": 32, |
| 58 | + "latent_dim": 8, |
| 59 | + "text_conv": False, |
| 60 | + } |
| 61 | + |
| 62 | + def get_dummy_inputs(self) -> dict[str, torch.Tensor]: |
| 63 | + batch_size = 1 |
| 64 | + sequence_length = 16 |
| 65 | + encoder_sequence_length = 10 |
| 66 | + latent_dim = 8 |
| 67 | + text_dim = 32 |
| 68 | + |
| 69 | + return { |
| 70 | + "hidden_states": randn_tensor( |
| 71 | + (batch_size, sequence_length, latent_dim), generator=self.generator, device=torch_device |
| 72 | + ), |
| 73 | + "encoder_hidden_states": randn_tensor( |
| 74 | + (batch_size, encoder_sequence_length, text_dim), generator=self.generator, device=torch_device |
| 75 | + ), |
| 76 | + "encoder_attention_mask": torch.ones( |
| 77 | + batch_size, encoder_sequence_length, dtype=torch.bool, device=torch_device |
| 78 | + ), |
| 79 | + "attention_mask": torch.ones(batch_size, sequence_length, dtype=torch.bool, device=torch_device), |
| 80 | + "timestep": torch.ones(batch_size, device=torch_device), |
| 81 | + } |
| 82 | + |
| 83 | + |
| 84 | +class TestLongCatAudioDiTTransformer(LongCatAudioDiTTransformerTesterConfig, ModelTesterMixin): |
| 85 | + pass |
| 86 | + |
| 87 | + |
| 88 | +class TestLongCatAudioDiTTransformerMemory(LongCatAudioDiTTransformerTesterConfig, MemoryTesterMixin): |
| 89 | + def test_layerwise_casting_memory(self): |
| 90 | + pytest.skip("LongCatAudioDiTTransformer does not support standard layerwise casting memory tests yet.") |
| 91 | + |
| 92 | + def test_layerwise_casting_training(self): |
| 93 | + pytest.skip("LongCatAudioDiTTransformer does not support standard layerwise casting training tests yet.") |
| 94 | + |
| 95 | + def test_group_offloading_with_layerwise_casting(self, *args, **kwargs): |
| 96 | + pytest.skip("LongCatAudioDiTTransformer does not support combined group offloading and layerwise casting tests yet.") |
| 97 | + |
| 98 | + |
| 99 | +class TestLongCatAudioDiTTransformerCompile(LongCatAudioDiTTransformerTesterConfig, TorchCompileTesterMixin): |
| 100 | + def test_torch_compile_repeated_blocks(self): |
| 101 | + pytest.skip("LongCatAudioDiTTransformer does not define repeated blocks for regional compilation.") |
| 102 | + |
| 103 | + |
| 104 | +class TestLongCatAudioDiTTransformerAttention(LongCatAudioDiTTransformerTesterConfig, AttentionTesterMixin): |
| 105 | + pass |
| 106 | + |
| 107 | + |
| 108 | +def test_longcat_audio_attention_uses_standard_self_attn_kwargs(): |
| 109 | + from diffusers.models.transformers.transformer_longcat_audio_dit import AudioDiTAttention |
| 110 | + |
| 111 | + attn = AudioDiTAttention(q_dim=4, kv_dim=None, heads=1, dim_head=4, dropout=0.0, bias=False) |
| 112 | + |
| 113 | + eye = torch.eye(4) |
| 114 | + with torch.no_grad(): |
| 115 | + attn.to_q.weight.copy_(eye) |
| 116 | + attn.to_k.weight.copy_(eye) |
| 117 | + attn.to_v.weight.copy_(eye) |
| 118 | + attn.to_out[0].weight.copy_(eye) |
| 119 | + |
| 120 | + hidden_states = torch.tensor([[[1.0, 0.0, 0.0, 0.0], [0.5, 0.5, 0.5, 0.5]]]) |
| 121 | + attention_mask = torch.tensor([[True, False]]) |
4 | 122 |
|
| 123 | + output = attn(hidden_states=hidden_states, attention_mask=attention_mask) |
5 | 124 |
|
6 | | -def test_longcat_audio_transformer_forward_shape(): |
7 | | - model = LongCatAudioDiTTransformer( |
8 | | - dit_dim=64, |
9 | | - dit_depth=2, |
10 | | - dit_heads=4, |
11 | | - dit_text_dim=32, |
12 | | - latent_dim=8, |
13 | | - text_conv=False, |
14 | | - ) |
15 | | - hidden_states = torch.randn(2, 16, 8) |
16 | | - encoder_hidden_states = torch.randn(2, 10, 32) |
17 | | - encoder_attention_mask = torch.ones(2, 10, dtype=torch.bool) |
18 | | - timestep = torch.tensor([1.0, 1.0]) |
19 | | - |
20 | | - output = model( |
21 | | - hidden_states=hidden_states, |
22 | | - encoder_hidden_states=encoder_hidden_states, |
23 | | - encoder_attention_mask=encoder_attention_mask, |
24 | | - timestep=timestep, |
25 | | - ) |
26 | | - |
27 | | - assert output.sample.shape == hidden_states.shape |
28 | | - |
29 | | - |
30 | | -def test_longcat_audio_transformer_masked_forward(): |
31 | | - model = LongCatAudioDiTTransformer( |
32 | | - dit_dim=64, |
33 | | - dit_depth=2, |
34 | | - dit_heads=4, |
35 | | - dit_text_dim=32, |
36 | | - latent_dim=8, |
37 | | - text_conv=False, |
38 | | - ) |
39 | | - hidden_states = torch.randn(2, 16, 8) |
40 | | - encoder_hidden_states = torch.randn(2, 10, 32) |
41 | | - encoder_attention_mask = torch.tensor([[1] * 10, [1] * 6 + [0] * 4], dtype=torch.bool) |
42 | | - attention_mask = torch.tensor([[1] * 16, [1] * 9 + [0] * 7], dtype=torch.bool) |
43 | | - timestep = torch.tensor([1.0, 1.0]) |
44 | | - |
45 | | - output = model( |
46 | | - hidden_states=hidden_states, |
47 | | - encoder_hidden_states=encoder_hidden_states, |
48 | | - encoder_attention_mask=encoder_attention_mask, |
49 | | - timestep=timestep, |
50 | | - attention_mask=attention_mask, |
51 | | - ) |
52 | | - |
53 | | - assert output.sample.shape == hidden_states.shape |
54 | | - assert torch.all(output.sample[1, 9:] == 0) |
| 125 | + assert torch.allclose(output[:, 1], torch.zeros_like(output[:, 1])) |
0 commit comments