|
25 | 25 | from ...loaders import FromOriginalModelMixin, PeftAdapterMixin
|
26 | 26 | from ...utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers
|
27 | 27 | from ...utils.torch_utils import maybe_allow_in_graph
|
28 | | -from ..attention import FeedForward |
| 28 | +from ..attention import AttentionMixin, FeedForward |
29 | 29 | from ..attention_dispatch import dispatch_attention_fn
|
30 | 30 | from ..attention_processor import Attention
|
31 | 31 | from ..cache_utils import CacheMixin
|
@@ -470,7 +470,9 @@ def forward(
|
470 | 470 | return encoder_hidden_states, hidden_states
|
471 | 471 |
|
472 | 472 |
|
473 | | -class QwenImageTransformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin): |
| 473 | +class QwenImageTransformer2DModel( |
| 474 | + ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin, CacheMixin, AttentionMixin |
| 475 | +): |
474 | 476 | """
|
475 | 477 | The Transformer model introduced in Qwen.
|
476 | 478 |
|
|
0 commit comments