From b3e04cc1c051b9d7269c93f8d15f41c09dd1ed01 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Tue, 11 Feb 2025 09:59:19 +0100 Subject: [PATCH] feat(diffusers): add support for Lumina2Text2ImgPipeline Signed-off-by: Ettore Di Giacinto --- backend/python/diffusers/backend.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/backend/python/diffusers/backend.py b/backend/python/diffusers/backend.py index c9aa02bcf498..25c0a7ae5b8f 100755 --- a/backend/python/diffusers/backend.py +++ b/backend/python/diffusers/backend.py @@ -19,7 +19,7 @@ from diffusers import SanaPipeline, StableDiffusion3Pipeline, StableDiffusionXLPipeline, StableDiffusionDepth2ImgPipeline, DPMSolverMultistepScheduler, StableDiffusionPipeline, DiffusionPipeline, \ EulerAncestralDiscreteScheduler, FluxPipeline, FluxTransformer2DModel -from diffusers import StableDiffusionImg2ImgPipeline, AutoPipelineForText2Image, ControlNetModel, StableVideoDiffusionPipeline +from diffusers import StableDiffusionImg2ImgPipeline, AutoPipelineForText2Image, ControlNetModel, StableVideoDiffusionPipeline, Lumina2Text2ImgPipeline from diffusers.pipelines.stable_diffusion import safety_checker from diffusers.utils import load_image, export_to_video from compel import Compel, ReturnedEmbeddingsType @@ -275,6 +275,12 @@ def LoadModel(self, request, context): if request.LowVRAM: self.pipe.enable_model_cpu_offload() + elif request.PipelineType == "Lumina2Text2ImgPipeline": + self.pipe = Lumina2Text2ImgPipeline.from_pretrained( + request.Model, + torch_dtype=torch.bfloat16) + if request.LowVRAM: + self.pipe.enable_model_cpu_offload() elif request.PipelineType == "SanaPipeline": self.pipe = SanaPipeline.from_pretrained( request.Model,