Skip to content

Commit bd58001

Browse files
committed
Run fix-copies to match code style
1 parent a1e7363 commit bd58001

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

45 files changed

+11
-599
lines changed

src/diffusers/loaders/lora_pipeline.py

Lines changed: 0 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -1463,18 +1463,6 @@ def load_lora_into_text_encoder(
14631463
@classmethod
14641464
# Copied from diffusers.loaders.lora_pipeline.StableDiffusionXLLoraLoaderMixin.save_lora_weights with unet->transformer
14651465
def save_lora_weights(
1466-
cls,
1467-
save_directory: Union[str, os.PathLike],
1468-
transformer_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1469-
text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1470-
text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1471-
is_main_process: bool = True,
1472-
weight_name: str = None,
1473-
save_function: Callable = None,
1474-
safe_serialization: bool = True,
1475-
):
1476-
r"""
1477-
Save the LoRA parameters corresponding to the UNet and text encoder.
14781466

14791467
Arguments:
14801468
save_directory (`str` or `os.PathLike`):
@@ -1525,15 +1513,6 @@ def save_lora_weights(
15251513
15261514
# Copied from diffusers.loaders.lora_pipeline.StableDiffusionXLLoraLoaderMixin.fuse_lora with unet->transformer
15271515
def fuse_lora(
1528-
self,
1529-
components: List[str] = ["transformer", "text_encoder", "text_encoder_2"],
1530-
lora_scale: float = 1.0,
1531-
safe_fusing: bool = False,
1532-
adapter_names: Optional[List[str]] = None,
1533-
**kwargs,
1534-
):
1535-
r"""
1536-
Fuses the LoRA parameters into the original parameters of the corresponding blocks.
15371516
15381517
<Tip warning={true}>
15391518
@@ -1573,9 +1552,6 @@ def fuse_lora(
15731552

15741553
# Copied from diffusers.loaders.lora_pipeline.StableDiffusionXLLoraLoaderMixin.unfuse_lora with unet->transformer
15751554
def unfuse_lora(self, components: List[str] = ["transformer", "text_encoder", "text_encoder_2"], **kwargs):
1576-
r"""
1577-
Reverses the effect of
1578-
[`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora).
15791555

15801556
<Tip warning={true}>
15811557

@@ -2083,17 +2059,6 @@ def load_lora_into_text_encoder(
20832059
@classmethod
20842060
# Copied from diffusers.loaders.lora_pipeline.StableDiffusionLoraLoaderMixin.save_lora_weights with unet->transformer
20852061
def save_lora_weights(
2086-
cls,
2087-
save_directory: Union[str, os.PathLike],
2088-
transformer_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
2089-
text_encoder_lora_layers: Dict[str, torch.nn.Module] = None,
2090-
is_main_process: bool = True,
2091-
weight_name: str = None,
2092-
save_function: Callable = None,
2093-
safe_serialization: bool = True,
2094-
):
2095-
r"""
2096-
Save the LoRA parameters corresponding to the UNet and text encoder.
20972062
20982063
Arguments:
20992064
save_directory (`str` or `os.PathLike`):
@@ -2496,17 +2461,6 @@ class AmusedLoraLoaderMixin(StableDiffusionLoraLoaderMixin):
24962461
@classmethod
24972462
# Copied from diffusers.loaders.lora_pipeline.FluxLoraLoaderMixin.load_lora_into_transformer with FluxTransformer2DModel->UVit2DModel
24982463
def load_lora_into_transformer(
2499-
cls,
2500-
state_dict,
2501-
network_alphas,
2502-
transformer,
2503-
adapter_name=None,
2504-
_pipeline=None,
2505-
low_cpu_mem_usage=False,
2506-
hotswap: bool = False,
2507-
):
2508-
"""
2509-
This will load the LoRA layers specified in `state_dict` into `transformer`.
25102464

25112465
Parameters:
25122466
state_dict (`dict`):
@@ -2854,10 +2808,6 @@ def load_lora_weights(
28542808
@classmethod
28552809
# Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.load_lora_into_transformer with SD3Transformer2DModel->CogVideoXTransformer3DModel
28562810
def load_lora_into_transformer(
2857-
cls, state_dict, transformer, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False
2858-
):
2859-
"""
2860-
This will load the LoRA layers specified in `state_dict` into `transformer`.
28612811
28622812
Parameters:
28632813
state_dict (`dict`):
@@ -3185,10 +3135,6 @@ def load_lora_weights(
31853135
@classmethod
31863136
# Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.load_lora_into_transformer with SD3Transformer2DModel->MochiTransformer3DModel
31873137
def load_lora_into_transformer(
3188-
cls, state_dict, transformer, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False
3189-
):
3190-
"""
3191-
This will load the LoRA layers specified in `state_dict` into `transformer`.
31923138

31933139
Parameters:
31943140
state_dict (`dict`):
@@ -3518,10 +3464,6 @@ def load_lora_weights(
35183464
@classmethod
35193465
# Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.load_lora_into_transformer with SD3Transformer2DModel->LTXVideoTransformer3DModel
35203466
def load_lora_into_transformer(
3521-
cls, state_dict, transformer, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False
3522-
):
3523-
"""
3524-
This will load the LoRA layers specified in `state_dict` into `transformer`.
35253467
35263468
Parameters:
35273469
state_dict (`dict`):
@@ -3851,10 +3793,6 @@ def load_lora_weights(
38513793
@classmethod
38523794
# Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.load_lora_into_transformer with SD3Transformer2DModel->SanaTransformer2DModel
38533795
def load_lora_into_transformer(
3854-
cls, state_dict, transformer, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False
3855-
):
3856-
"""
3857-
This will load the LoRA layers specified in `state_dict` into `transformer`.
38583796

38593797
Parameters:
38603798
state_dict (`dict`):
@@ -4187,10 +4125,6 @@ def load_lora_weights(
41874125
@classmethod
41884126
# Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.load_lora_into_transformer with SD3Transformer2DModel->HunyuanVideoTransformer3DModel
41894127
def load_lora_into_transformer(
4190-
cls, state_dict, transformer, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False
4191-
):
4192-
"""
4193-
This will load the LoRA layers specified in `state_dict` into `transformer`.
41944128
41954129
Parameters:
41964130
state_dict (`dict`):
@@ -4524,10 +4458,6 @@ def load_lora_weights(
45244458
@classmethod
45254459
# Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.load_lora_into_transformer with SD3Transformer2DModel->Lumina2Transformer2DModel
45264460
def load_lora_into_transformer(
4527-
cls, state_dict, transformer, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False
4528-
):
4529-
"""
4530-
This will load the LoRA layers specified in `state_dict` into `transformer`.
45314461

45324462
Parameters:
45334463
state_dict (`dict`):
@@ -4890,10 +4820,6 @@ def load_lora_weights(
48904820
@classmethod
48914821
# Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.load_lora_into_transformer with SD3Transformer2DModel->WanTransformer3DModel
48924822
def load_lora_into_transformer(
4893-
cls, state_dict, transformer, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False
4894-
):
4895-
"""
4896-
This will load the LoRA layers specified in `state_dict` into `transformer`.
48974823
48984824
Parameters:
48994825
state_dict (`dict`):
@@ -5223,10 +5149,6 @@ def load_lora_weights(
52235149
@classmethod
52245150
# Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.load_lora_into_transformer with SD3Transformer2DModel->CogView4Transformer2DModel
52255151
def load_lora_into_transformer(
5226-
cls, state_dict, transformer, adapter_name=None, _pipeline=None, low_cpu_mem_usage=False, hotswap: bool = False
5227-
):
5228-
"""
5229-
This will load the LoRA layers specified in `state_dict` into `transformer`.
52305152

52315153
Parameters:
52325154
state_dict (`dict`):

src/diffusers/models/controlnets/multicontrolnet_union.py

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -85,16 +85,6 @@ def forward(
8585

8686
# Copied from diffusers.models.controlnets.multicontrolnet.MultiControlNetModel.save_pretrained with ControlNet->ControlNetUnion
8787
def save_pretrained(
88-
self,
89-
save_directory: Union[str, os.PathLike],
90-
is_main_process: bool = True,
91-
save_function: Callable = None,
92-
safe_serialization: bool = True,
93-
variant: Optional[str] = None,
94-
):
95-
"""
96-
Save a model and its configuration file to a directory, so that it can be re-loaded using the
97-
`[`~models.controlnets.multicontrolnet.MultiControlNetUnionModel.from_pretrained`]` class method.
9888

9989
Arguments:
10090
save_directory (`str` or `os.PathLike`):
@@ -125,8 +115,6 @@ def save_pretrained(
125115
@classmethod
126116
# Copied from diffusers.models.controlnets.multicontrolnet.MultiControlNetModel.from_pretrained with ControlNet->ControlNetUnion
127117
def from_pretrained(cls, pretrained_model_path: Optional[Union[str, os.PathLike]], **kwargs):
128-
r"""
129-
Instantiate a pretrained MultiControlNetUnion model from multiple pre-trained controlnet models.
130118
131119
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
132120
the model, you should first set it back in training mode with `model.train()`.

src/diffusers/models/transformers/auraflow_transformer_2d.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -406,9 +406,6 @@ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
406406

407407
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedAuraFlowAttnProcessor2_0
408408
def fuse_qkv_projections(self):
409-
"""
410-
Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
411-
are fused. For cross-attention modules, key and value projection matrices are fused.
412409

413410
<Tip warning={true}>
414411

src/diffusers/models/transformers/cogvideox_transformer_3d.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -393,9 +393,6 @@ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
393393

394394
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedCogVideoXAttnProcessor2_0
395395
def fuse_qkv_projections(self):
396-
"""
397-
Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
398-
are fused. For cross-attention modules, key and value projection matrices are fused.
399396

400397
<Tip warning={true}>
401398

src/diffusers/models/transformers/hunyuan_transformer_2d.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -320,9 +320,6 @@ def __init__(
320320

321321
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedHunyuanAttnProcessor2_0
322322
def fuse_qkv_projections(self):
323-
"""
324-
Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
325-
are fused. For cross-attention modules, key and value projection matrices are fused.
326323

327324
<Tip warning={true}>
328325

src/diffusers/models/transformers/stable_audio_transformer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -341,7 +341,7 @@ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
341341

342342
# Copied from diffusers.models.transformers.hunyuan_transformer_2d.HunyuanDiT2DModel.set_default_attn_processor with Hunyuan->StableAudio
343343
def set_default_attn_processor(self):
344-
"""
344+
"""
345345
Disables custom attention processors and sets the default attention implementation.
346346
"""
347347
self.set_attn_processor(StableAudioAttnProcessor2_0())

src/diffusers/models/transformers/transformer_flux.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -348,9 +348,6 @@ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
348348

349349
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedFluxAttnProcessor2_0
350350
def fuse_qkv_projections(self):
351-
"""
352-
Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
353-
are fused. For cross-attention modules, key and value projection matrices are fused.
354351

355352
<Tip warning={true}>
356353

src/diffusers/models/transformers/transformer_sd3.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -276,9 +276,6 @@ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
276276

277277
# Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedJointAttnProcessor2_0
278278
def fuse_qkv_projections(self):
279-
"""
280-
Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
281-
are fused. For cross-attention modules, key and value projection matrices are fused.
282279

283280
<Tip warning={true}>
284281

src/diffusers/pipelines/allegro/pipeline_allegro.py

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -213,22 +213,6 @@ def __init__(
213213

214214
# Copied from diffusers.pipelines.pixart_alpha.pipeline_pixart_alpha.PixArtAlphaPipeline.encode_prompt with 120->512, num_images_per_prompt->num_videos_per_prompt
215215
def encode_prompt(
216-
self,
217-
prompt: Union[str, List[str]],
218-
do_classifier_free_guidance: bool = True,
219-
negative_prompt: str = "",
220-
num_videos_per_prompt: int = 1,
221-
device: Optional[torch.device] = None,
222-
prompt_embeds: Optional[torch.Tensor] = None,
223-
negative_prompt_embeds: Optional[torch.Tensor] = None,
224-
prompt_attention_mask: Optional[torch.Tensor] = None,
225-
negative_prompt_attention_mask: Optional[torch.Tensor] = None,
226-
clean_caption: bool = False,
227-
max_sequence_length: int = 512,
228-
**kwargs,
229-
):
230-
r"""
231-
Encodes the prompt into text encoder hidden states.
232216

233217
Args:
234218
prompt (`str` or `List[str]`, *optional*):

src/diffusers/pipelines/animatediff/pipeline_animatediff.py

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -154,19 +154,6 @@ def __init__(
154154

155155
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt
156156
def encode_prompt(
157-
self,
158-
prompt,
159-
device,
160-
num_images_per_prompt,
161-
do_classifier_free_guidance,
162-
negative_prompt=None,
163-
prompt_embeds: Optional[torch.Tensor] = None,
164-
negative_prompt_embeds: Optional[torch.Tensor] = None,
165-
lora_scale: Optional[float] = None,
166-
clip_skip: Optional[int] = None,
167-
):
168-
r"""
169-
Encodes the prompt into text encoder hidden states.
170157

171158
Args:
172159
prompt (`str` or `List[str]`, *optional*):

src/diffusers/pipelines/animatediff/pipeline_animatediff_controlnet.py

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -197,19 +197,6 @@ def __init__(
197197

198198
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt
199199
def encode_prompt(
200-
self,
201-
prompt,
202-
device,
203-
num_images_per_prompt,
204-
do_classifier_free_guidance,
205-
negative_prompt=None,
206-
prompt_embeds: Optional[torch.Tensor] = None,
207-
negative_prompt_embeds: Optional[torch.Tensor] = None,
208-
lora_scale: Optional[float] = None,
209-
clip_skip: Optional[int] = None,
210-
):
211-
r"""
212-
Encodes the prompt into text encoder hidden states.
213200

214201
Args:
215202
prompt (`str` or `List[str]`, *optional*):

src/diffusers/pipelines/animatediff/pipeline_animatediff_sdxl.py

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -327,23 +327,6 @@ def __init__(
327327

328328
# Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.encode_prompt with num_images_per_prompt->num_videos_per_prompt
329329
def encode_prompt(
330-
self,
331-
prompt: str,
332-
prompt_2: Optional[str] = None,
333-
device: Optional[torch.device] = None,
334-
num_videos_per_prompt: int = 1,
335-
do_classifier_free_guidance: bool = True,
336-
negative_prompt: Optional[str] = None,
337-
negative_prompt_2: Optional[str] = None,
338-
prompt_embeds: Optional[torch.Tensor] = None,
339-
negative_prompt_embeds: Optional[torch.Tensor] = None,
340-
pooled_prompt_embeds: Optional[torch.Tensor] = None,
341-
negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
342-
lora_scale: Optional[float] = None,
343-
clip_skip: Optional[int] = None,
344-
):
345-
r"""
346-
Encodes the prompt into text encoder hidden states.
347330

348331
Args:
349332
prompt (`str` or `List[str]`, *optional*):

src/diffusers/pipelines/animatediff/pipeline_animatediff_sparsectrl.py

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -206,19 +206,6 @@ def __init__(
206206

207207
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt
208208
def encode_prompt(
209-
self,
210-
prompt,
211-
device,
212-
num_images_per_prompt,
213-
do_classifier_free_guidance,
214-
negative_prompt=None,
215-
prompt_embeds: Optional[torch.Tensor] = None,
216-
negative_prompt_embeds: Optional[torch.Tensor] = None,
217-
lora_scale: Optional[float] = None,
218-
clip_skip: Optional[int] = None,
219-
):
220-
r"""
221-
Encodes the prompt into text encoder hidden states.
222209

223210
Args:
224211
prompt (`str` or `List[str]`, *optional*):

src/diffusers/pipelines/audioldm/pipeline_audioldm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -335,7 +335,7 @@ def check_inputs(
335335

336336
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents with width->self.vocoder.config.model_in_dim
337337
def prepare_latents(self, batch_size, num_channels_latents, height, dtype, device, generator, latents=None):
338-
shape = (
338+
shape = (
339339
batch_size,
340340
num_channels_latents,
341341
int(height) // self.vae_scale_factor,

src/diffusers/pipelines/audioldm2/pipeline_audioldm2.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -815,7 +815,7 @@ def check_inputs(
815815

816816
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents with width->self.vocoder.config.model_in_dim
817817
def prepare_latents(self, batch_size, num_channels_latents, height, dtype, device, generator, latents=None):
818-
shape = (
818+
shape = (
819819
batch_size,
820820
num_channels_latents,
821821
int(height) // self.vae_scale_factor,

src/diffusers/pipelines/cogview3/pipeline_cogview3plus.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -166,7 +166,7 @@ def __init__(
166166

167167
# Copied from diffusers.pipelines.cogvideo.pipeline_cogvideox.CogVideoXPipeline._get_t5_prompt_embeds with num_videos_per_prompt->num_images_per_prompt
168168
def _get_t5_prompt_embeds(
169-
self,
169+
self,
170170
prompt: Union[str, List[str]] = None,
171171
num_images_per_prompt: int = 1,
172172
max_sequence_length: int = 226,

src/diffusers/pipelines/deepfloyd_if/pipeline_if_img2img_superresolution.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -639,7 +639,7 @@ def check_inputs(
639639

640640
# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if_img2img.IFImg2ImgPipeline.preprocess_image with preprocess_image -> preprocess_original_image
641641
def preprocess_original_image(self, image: PIL.Image.Image) -> torch.Tensor:
642-
if not isinstance(image, list):
642+
if not isinstance(image, list):
643643
image = [image]
644644

645645
def numpy_to_pt(images):

0 commit comments

Comments
 (0)