| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | import inspect |
| | import os |
| | from typing import Any, Callable, Dict, List, Optional, Tuple, Union |
| |
|
| | import torch |
| | from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer |
| |
|
| | from diffusers import DiffusionPipeline, StableDiffusionXLPipeline |
| | from diffusers.image_processor import VaeImageProcessor |
| | from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin |
| | from diffusers.models import AutoencoderKL, UNet2DConditionModel |
| | from diffusers.models.attention_processor import ( |
| | AttnProcessor2_0, |
| | LoRAAttnProcessor2_0, |
| | LoRAXFormersAttnProcessor, |
| | XFormersAttnProcessor, |
| | ) |
| | from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput |
| | from diffusers.schedulers import KarrasDiffusionSchedulers |
| | from diffusers.utils import ( |
| | is_accelerate_available, |
| | is_accelerate_version, |
| | is_invisible_watermark_available, |
| | logging, |
| | replace_example_docstring, |
| | ) |
| | from diffusers.utils.torch_utils import randn_tensor |
| |
|
| |
|
| | if is_invisible_watermark_available(): |
| | from diffusers.pipelines.stable_diffusion_xl.watermark import StableDiffusionXLWatermarker |
| |
|
| |
|
| | def parse_prompt_attention(text): |
| | """ |
| | Parses a string with attention tokens and returns a list of pairs: text and its associated weight. |
| | Accepted tokens are: |
| | (abc) - increases attention to abc by a multiplier of 1.1 |
| | (abc:3.12) - increases attention to abc by a multiplier of 3.12 |
| | [abc] - decreases attention to abc by a multiplier of 1.1 |
| | \( - literal character '(' |
| | \[ - literal character '[' |
| | \) - literal character ')' |
| | \] - literal character ']' |
| | \\ - literal character '\' |
| | anything else - just text |
| | |
| | >>> parse_prompt_attention('normal text') |
| | [['normal text', 1.0]] |
| | >>> parse_prompt_attention('an (important) word') |
| | [['an ', 1.0], ['important', 1.1], [' word', 1.0]] |
| | >>> parse_prompt_attention('(unbalanced') |
| | [['unbalanced', 1.1]] |
| | >>> parse_prompt_attention('\(literal\]') |
| | [['(literal]', 1.0]] |
| | >>> parse_prompt_attention('(unnecessary)(parens)') |
| | [['unnecessaryparens', 1.1]] |
| | >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).') |
| | [['a ', 1.0], |
| | ['house', 1.5730000000000004], |
| | [' ', 1.1], |
| | ['on', 1.0], |
| | [' a ', 1.1], |
| | ['hill', 0.55], |
| | [', sun, ', 1.1], |
| | ['sky', 1.4641000000000006], |
| | ['.', 1.1]] |
| | """ |
| | import re |
| |
|
| | re_attention = re.compile( |
| | r""" |
| | \\\(|\\\)|\\\[|\\]|\\\\|\\|\(|\[|:([+-]?[.\d]+)\)| |
| | \)|]|[^\\()\[\]:]+|: |
| | """, |
| | re.X, |
| | ) |
| |
|
| | re_break = re.compile(r"\s*\bBREAK\b\s*", re.S) |
| |
|
| | res = [] |
| | round_brackets = [] |
| | square_brackets = [] |
| |
|
| | round_bracket_multiplier = 1.1 |
| | square_bracket_multiplier = 1 / 1.1 |
| |
|
| | def multiply_range(start_position, multiplier): |
| | for p in range(start_position, len(res)): |
| | res[p][1] *= multiplier |
| |
|
| | for m in re_attention.finditer(text): |
| | text = m.group(0) |
| | weight = m.group(1) |
| |
|
| | if text.startswith("\\"): |
| | res.append([text[1:], 1.0]) |
| | elif text == "(": |
| | round_brackets.append(len(res)) |
| | elif text == "[": |
| | square_brackets.append(len(res)) |
| | elif weight is not None and len(round_brackets) > 0: |
| | multiply_range(round_brackets.pop(), float(weight)) |
| | elif text == ")" and len(round_brackets) > 0: |
| | multiply_range(round_brackets.pop(), round_bracket_multiplier) |
| | elif text == "]" and len(square_brackets) > 0: |
| | multiply_range(square_brackets.pop(), square_bracket_multiplier) |
| | else: |
| | parts = re.split(re_break, text) |
| | for i, part in enumerate(parts): |
| | if i > 0: |
| | res.append(["BREAK", -1]) |
| | res.append([part, 1.0]) |
| |
|
| | for pos in round_brackets: |
| | multiply_range(pos, round_bracket_multiplier) |
| |
|
| | for pos in square_brackets: |
| | multiply_range(pos, square_bracket_multiplier) |
| |
|
| | if len(res) == 0: |
| | res = [["", 1.0]] |
| |
|
| | |
| | i = 0 |
| | while i + 1 < len(res): |
| | if res[i][1] == res[i + 1][1]: |
| | res[i][0] += res[i + 1][0] |
| | res.pop(i + 1) |
| | else: |
| | i += 1 |
| |
|
| | return res |
| |
|
| |
|
| | def get_prompts_tokens_with_weights(clip_tokenizer: CLIPTokenizer, prompt: str): |
| | """ |
| | Get prompt token ids and weights, this function works for both prompt and negative prompt |
| | |
| | Args: |
| | pipe (CLIPTokenizer) |
| | A CLIPTokenizer |
| | prompt (str) |
| | A prompt string with weights |
| | |
| | Returns: |
| | text_tokens (list) |
| | A list contains token ids |
| | text_weight (list) |
| | A list contains the correspodent weight of token ids |
| | |
| | Example: |
| | import torch |
| | from transformers import CLIPTokenizer |
| | |
| | clip_tokenizer = CLIPTokenizer.from_pretrained( |
| | "stablediffusionapi/deliberate-v2" |
| | , subfolder = "tokenizer" |
| | , dtype = torch.float16 |
| | ) |
| | |
| | token_id_list, token_weight_list = get_prompts_tokens_with_weights( |
| | clip_tokenizer = clip_tokenizer |
| | ,prompt = "a (red:1.5) cat"*70 |
| | ) |
| | """ |
| | texts_and_weights = parse_prompt_attention(prompt) |
| | text_tokens, text_weights = [], [] |
| | for word, weight in texts_and_weights: |
| | |
| | token = clip_tokenizer(word, truncation=False).input_ids[1:-1] |
| | |
| |
|
| | |
| | text_tokens = [*text_tokens, *token] |
| |
|
| | |
| | |
| | chunk_weights = [weight] * len(token) |
| |
|
| | |
| | text_weights = [*text_weights, *chunk_weights] |
| | return text_tokens, text_weights |
| |
|
| |
|
| | def group_tokens_and_weights(token_ids: list, weights: list, pad_last_block=False): |
| | """ |
| | Produce tokens and weights in groups and pad the missing tokens |
| | |
| | Args: |
| | token_ids (list) |
| | The token ids from tokenizer |
| | weights (list) |
| | The weights list from function get_prompts_tokens_with_weights |
| | pad_last_block (bool) |
| | Control if fill the last token list to 75 tokens with eos |
| | Returns: |
| | new_token_ids (2d list) |
| | new_weights (2d list) |
| | |
| | Example: |
| | token_groups,weight_groups = group_tokens_and_weights( |
| | token_ids = token_id_list |
| | , weights = token_weight_list |
| | ) |
| | """ |
| | bos, eos = 49406, 49407 |
| |
|
| | |
| | new_token_ids = [] |
| | new_weights = [] |
| | while len(token_ids) >= 75: |
| | |
| | head_75_tokens = [token_ids.pop(0) for _ in range(75)] |
| | head_75_weights = [weights.pop(0) for _ in range(75)] |
| |
|
| | |
| | temp_77_token_ids = [bos] + head_75_tokens + [eos] |
| | temp_77_weights = [1.0] + head_75_weights + [1.0] |
| |
|
| | |
| | new_token_ids.append(temp_77_token_ids) |
| | new_weights.append(temp_77_weights) |
| |
|
| | |
| | if len(token_ids) > 0: |
| | padding_len = 75 - len(token_ids) if pad_last_block else 0 |
| |
|
| | temp_77_token_ids = [bos] + token_ids + [eos] * padding_len + [eos] |
| | new_token_ids.append(temp_77_token_ids) |
| |
|
| | temp_77_weights = [1.0] + weights + [1.0] * padding_len + [1.0] |
| | new_weights.append(temp_77_weights) |
| |
|
| | return new_token_ids, new_weights |
| |
|
| |
|
| | def get_weighted_text_embeddings_sdxl( |
| | pipe: StableDiffusionXLPipeline, |
| | prompt: str = "", |
| | prompt_2: str = None, |
| | neg_prompt: str = "", |
| | neg_prompt_2: str = None, |
| | ): |
| | """ |
| | This function can process long prompt with weights, no length limitation |
| | for Stable Diffusion XL |
| | |
| | Args: |
| | pipe (StableDiffusionPipeline) |
| | prompt (str) |
| | prompt_2 (str) |
| | neg_prompt (str) |
| | neg_prompt_2 (str) |
| | Returns: |
| | prompt_embeds (torch.Tensor) |
| | neg_prompt_embeds (torch.Tensor) |
| | """ |
| | if prompt_2: |
| | prompt = f"{prompt} {prompt_2}" |
| |
|
| | if neg_prompt_2: |
| | neg_prompt = f"{neg_prompt} {neg_prompt_2}" |
| |
|
| | eos = pipe.tokenizer.eos_token_id |
| |
|
| | |
| | prompt_tokens, prompt_weights = get_prompts_tokens_with_weights(pipe.tokenizer, prompt) |
| |
|
| | neg_prompt_tokens, neg_prompt_weights = get_prompts_tokens_with_weights(pipe.tokenizer, neg_prompt) |
| |
|
| | |
| | prompt_tokens_2, prompt_weights_2 = get_prompts_tokens_with_weights(pipe.tokenizer_2, prompt) |
| |
|
| | neg_prompt_tokens_2, neg_prompt_weights_2 = get_prompts_tokens_with_weights(pipe.tokenizer_2, neg_prompt) |
| |
|
| | |
| | prompt_token_len = len(prompt_tokens) |
| | neg_prompt_token_len = len(neg_prompt_tokens) |
| |
|
| | if prompt_token_len > neg_prompt_token_len: |
| | |
| | neg_prompt_tokens = neg_prompt_tokens + [eos] * abs(prompt_token_len - neg_prompt_token_len) |
| | neg_prompt_weights = neg_prompt_weights + [1.0] * abs(prompt_token_len - neg_prompt_token_len) |
| | else: |
| | |
| | prompt_tokens = prompt_tokens + [eos] * abs(prompt_token_len - neg_prompt_token_len) |
| | prompt_weights = prompt_weights + [1.0] * abs(prompt_token_len - neg_prompt_token_len) |
| |
|
| | |
| | prompt_token_len_2 = len(prompt_tokens_2) |
| | neg_prompt_token_len_2 = len(neg_prompt_tokens_2) |
| |
|
| | if prompt_token_len_2 > neg_prompt_token_len_2: |
| | |
| | neg_prompt_tokens_2 = neg_prompt_tokens_2 + [eos] * abs(prompt_token_len_2 - neg_prompt_token_len_2) |
| | neg_prompt_weights_2 = neg_prompt_weights_2 + [1.0] * abs(prompt_token_len_2 - neg_prompt_token_len_2) |
| | else: |
| | |
| | prompt_tokens_2 = prompt_tokens_2 + [eos] * abs(prompt_token_len_2 - neg_prompt_token_len_2) |
| | prompt_weights_2 = prompt_weights + [1.0] * abs(prompt_token_len_2 - neg_prompt_token_len_2) |
| |
|
| | embeds = [] |
| | neg_embeds = [] |
| |
|
| | prompt_token_groups, prompt_weight_groups = group_tokens_and_weights(prompt_tokens.copy(), prompt_weights.copy()) |
| |
|
| | neg_prompt_token_groups, neg_prompt_weight_groups = group_tokens_and_weights( |
| | neg_prompt_tokens.copy(), neg_prompt_weights.copy() |
| | ) |
| |
|
| | prompt_token_groups_2, prompt_weight_groups_2 = group_tokens_and_weights( |
| | prompt_tokens_2.copy(), prompt_weights_2.copy() |
| | ) |
| |
|
| | neg_prompt_token_groups_2, neg_prompt_weight_groups_2 = group_tokens_and_weights( |
| | neg_prompt_tokens_2.copy(), neg_prompt_weights_2.copy() |
| | ) |
| |
|
| | |
| | for i in range(len(prompt_token_groups)): |
| | |
| | token_tensor = torch.tensor([prompt_token_groups[i]], dtype=torch.long, device=pipe.device) |
| | weight_tensor = torch.tensor(prompt_weight_groups[i], dtype=torch.float16, device=pipe.device) |
| |
|
| | token_tensor_2 = torch.tensor([prompt_token_groups_2[i]], dtype=torch.long, device=pipe.device) |
| |
|
| | |
| | prompt_embeds_1 = pipe.text_encoder(token_tensor.to(pipe.device), output_hidden_states=True) |
| | prompt_embeds_1_hidden_states = prompt_embeds_1.hidden_states[-2] |
| |
|
| | |
| | prompt_embeds_2 = pipe.text_encoder_2(token_tensor_2.to(pipe.device), output_hidden_states=True) |
| | prompt_embeds_2_hidden_states = prompt_embeds_2.hidden_states[-2] |
| | pooled_prompt_embeds = prompt_embeds_2[0] |
| |
|
| | prompt_embeds_list = [prompt_embeds_1_hidden_states, prompt_embeds_2_hidden_states] |
| | token_embedding = torch.concat(prompt_embeds_list, dim=-1).squeeze(0) |
| |
|
| | for j in range(len(weight_tensor)): |
| | if weight_tensor[j] != 1.0: |
| | token_embedding[j] = ( |
| | token_embedding[-1] + (token_embedding[j] - token_embedding[-1]) * weight_tensor[j] |
| | ) |
| |
|
| | token_embedding = token_embedding.unsqueeze(0) |
| | embeds.append(token_embedding) |
| |
|
| | |
| | neg_token_tensor = torch.tensor([neg_prompt_token_groups[i]], dtype=torch.long, device=pipe.device) |
| | neg_token_tensor_2 = torch.tensor([neg_prompt_token_groups_2[i]], dtype=torch.long, device=pipe.device) |
| | neg_weight_tensor = torch.tensor(neg_prompt_weight_groups[i], dtype=torch.float16, device=pipe.device) |
| |
|
| | |
| | neg_prompt_embeds_1 = pipe.text_encoder(neg_token_tensor.to(pipe.device), output_hidden_states=True) |
| | neg_prompt_embeds_1_hidden_states = neg_prompt_embeds_1.hidden_states[-2] |
| |
|
| | |
| | neg_prompt_embeds_2 = pipe.text_encoder_2(neg_token_tensor_2.to(pipe.device), output_hidden_states=True) |
| | neg_prompt_embeds_2_hidden_states = neg_prompt_embeds_2.hidden_states[-2] |
| | negative_pooled_prompt_embeds = neg_prompt_embeds_2[0] |
| |
|
| | neg_prompt_embeds_list = [neg_prompt_embeds_1_hidden_states, neg_prompt_embeds_2_hidden_states] |
| | neg_token_embedding = torch.concat(neg_prompt_embeds_list, dim=-1).squeeze(0) |
| |
|
| | for z in range(len(neg_weight_tensor)): |
| | if neg_weight_tensor[z] != 1.0: |
| | neg_token_embedding[z] = ( |
| | neg_token_embedding[-1] + (neg_token_embedding[z] - neg_token_embedding[-1]) * neg_weight_tensor[z] |
| | ) |
| |
|
| | neg_token_embedding = neg_token_embedding.unsqueeze(0) |
| | neg_embeds.append(neg_token_embedding) |
| |
|
| | prompt_embeds = torch.cat(embeds, dim=1) |
| | negative_prompt_embeds = torch.cat(neg_embeds, dim=1) |
| |
|
| | return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds |
| |
|
| |
|
| | |
| | |
| | |
| |
|
| | logger = logging.get_logger(__name__) |
| |
|
| | EXAMPLE_DOC_STRING = """ |
| | Examples: |
| | ```py |
| | from diffusers import DiffusionPipeline |
| | import torch |
| | |
| | pipe = DiffusionPipeline.from_pretrained( |
| | "stabilityai/stable-diffusion-xl-base-1.0" |
| | , torch_dtype = torch.float16 |
| | , use_safetensors = True |
| | , variant = "fp16" |
| | , custom_pipeline = "lpw_stable_diffusion_xl", |
| | ) |
| | |
| | prompt = "a white cat running on the grass"*20 |
| | prompt2 = "play a football"*20 |
| | prompt = f"{prompt},{prompt2}" |
| | neg_prompt = "blur, low quality" |
| | |
| | pipe.to("cuda") |
| | images = pipe( |
| | prompt = prompt |
| | , negative_prompt = neg_prompt |
| | ).images[0] |
| | |
| | pipe.to("cpu") |
| | torch.cuda.empty_cache() |
| | images |
| | ``` |
| | """ |
| |
|
| |
|
| | |
| | def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): |
| | """ |
| | Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and |
| | Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 |
| | """ |
| | std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) |
| | std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) |
| | |
| | noise_pred_rescaled = noise_cfg * (std_text / std_cfg) |
| | |
| | noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg |
| | return noise_cfg |
| |
|
| |
|
| | class SDXLLongPromptWeightingPipeline(DiffusionPipeline, FromSingleFileMixin, LoraLoaderMixin): |
| | r""" |
| | Pipeline for text-to-image generation using Stable Diffusion XL. |
| | |
| | This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the |
| | library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) |
| | |
| | In addition the pipeline inherits the following loading methods: |
| | - *LoRA*: [`StableDiffusionXLPipeline.load_lora_weights`] |
| | - *Ckpt*: [`loaders.FromSingleFileMixin.from_single_file`] |
| | |
| | as well as the following saving methods: |
| | - *LoRA*: [`loaders.StableDiffusionXLPipeline.save_lora_weights`] |
| | |
| | Args: |
| | vae ([`AutoencoderKL`]): |
| | Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. |
| | text_encoder ([`CLIPTextModel`]): |
| | Frozen text-encoder. Stable Diffusion XL uses the text portion of |
| | [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically |
| | the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. |
| | text_encoder_2 ([` CLIPTextModelWithProjection`]): |
| | Second frozen text-encoder. Stable Diffusion XL uses the text and pool portion of |
| | [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection), |
| | specifically the |
| | [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k) |
| | variant. |
| | tokenizer (`CLIPTokenizer`): |
| | Tokenizer of class |
| | [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). |
| | tokenizer_2 (`CLIPTokenizer`): |
| | Second Tokenizer of class |
| | [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). |
| | unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. |
| | scheduler ([`SchedulerMixin`]): |
| | A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of |
| | [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. |
| | """ |
| |
|
| | def __init__( |
| | self, |
| | vae: AutoencoderKL, |
| | text_encoder: CLIPTextModel, |
| | text_encoder_2: CLIPTextModelWithProjection, |
| | tokenizer: CLIPTokenizer, |
| | tokenizer_2: CLIPTokenizer, |
| | unet: UNet2DConditionModel, |
| | scheduler: KarrasDiffusionSchedulers, |
| | force_zeros_for_empty_prompt: bool = True, |
| | add_watermarker: Optional[bool] = None, |
| | ): |
| | super().__init__() |
| |
|
| | self.register_modules( |
| | vae=vae, |
| | text_encoder=text_encoder, |
| | text_encoder_2=text_encoder_2, |
| | tokenizer=tokenizer, |
| | tokenizer_2=tokenizer_2, |
| | unet=unet, |
| | scheduler=scheduler, |
| | ) |
| | self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) |
| | self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) |
| | self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) |
| | self.default_sample_size = self.unet.config.sample_size |
| |
|
| | add_watermarker = add_watermarker if add_watermarker is not None else is_invisible_watermark_available() |
| |
|
| | if add_watermarker: |
| | self.watermark = StableDiffusionXLWatermarker() |
| | else: |
| | self.watermark = None |
| |
|
| | |
| | def enable_vae_slicing(self): |
| | r""" |
| | Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to |
| | compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. |
| | """ |
| | self.vae.enable_slicing() |
| |
|
| | |
| | def disable_vae_slicing(self): |
| | r""" |
| | Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to |
| | computing decoding in one step. |
| | """ |
| | self.vae.disable_slicing() |
| |
|
| | |
| | def enable_vae_tiling(self): |
| | r""" |
| | Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to |
| | compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow |
| | processing larger images. |
| | """ |
| | self.vae.enable_tiling() |
| |
|
| | |
| | def disable_vae_tiling(self): |
| | r""" |
| | Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to |
| | computing decoding in one step. |
| | """ |
| | self.vae.disable_tiling() |
| |
|
| | def enable_model_cpu_offload(self, gpu_id=0): |
| | r""" |
| | Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared |
| | to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` |
| | method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with |
| | `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. |
| | """ |
| | if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): |
| | from accelerate import cpu_offload_with_hook |
| | else: |
| | raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") |
| |
|
| | device = torch.device(f"cuda:{gpu_id}") |
| |
|
| | if self.device.type != "cpu": |
| | self.to("cpu", silence_dtype_warnings=True) |
| | torch.cuda.empty_cache() |
| |
|
| | model_sequence = ( |
| | [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] |
| | ) |
| | model_sequence.extend([self.unet, self.vae]) |
| |
|
| | hook = None |
| | for cpu_offloaded_model in model_sequence: |
| | _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) |
| |
|
| | |
| | self.final_offload_hook = hook |
| |
|
| | def encode_prompt( |
| | self, |
| | prompt: str, |
| | prompt_2: Optional[str] = None, |
| | device: Optional[torch.device] = None, |
| | num_images_per_prompt: int = 1, |
| | do_classifier_free_guidance: bool = True, |
| | negative_prompt: Optional[str] = None, |
| | negative_prompt_2: Optional[str] = None, |
| | prompt_embeds: Optional[torch.FloatTensor] = None, |
| | negative_prompt_embeds: Optional[torch.FloatTensor] = None, |
| | pooled_prompt_embeds: Optional[torch.FloatTensor] = None, |
| | negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, |
| | lora_scale: Optional[float] = None, |
| | ): |
| | r""" |
| | Encodes the prompt into text encoder hidden states. |
| | |
| | Args: |
| | prompt (`str` or `List[str]`, *optional*): |
| | prompt to be encoded |
| | prompt_2 (`str` or `List[str]`, *optional*): |
| | The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is |
| | used in both text-encoders |
| | device: (`torch.device`): |
| | torch device |
| | num_images_per_prompt (`int`): |
| | number of images that should be generated per prompt |
| | do_classifier_free_guidance (`bool`): |
| | whether to use classifier free guidance or not |
| | negative_prompt (`str` or `List[str]`, *optional*): |
| | The prompt or prompts not to guide the image generation. If not defined, one has to pass |
| | `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is |
| | less than `1`). |
| | negative_prompt_2 (`str` or `List[str]`, *optional*): |
| | The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and |
| | `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders |
| | prompt_embeds (`torch.FloatTensor`, *optional*): |
| | Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not |
| | provided, text embeddings will be generated from `prompt` input argument. |
| | negative_prompt_embeds (`torch.FloatTensor`, *optional*): |
| | Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt |
| | weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input |
| | argument. |
| | pooled_prompt_embeds (`torch.FloatTensor`, *optional*): |
| | Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. |
| | If not provided, pooled text embeddings will be generated from `prompt` input argument. |
| | negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): |
| | Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt |
| | weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` |
| | input argument. |
| | lora_scale (`float`, *optional*): |
| | A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. |
| | """ |
| | device = device or self._execution_device |
| |
|
| | |
| | |
| | if lora_scale is not None and isinstance(self, LoraLoaderMixin): |
| | self._lora_scale = lora_scale |
| |
|
| | if prompt is not None and isinstance(prompt, str): |
| | batch_size = 1 |
| | elif prompt is not None and isinstance(prompt, list): |
| | batch_size = len(prompt) |
| | else: |
| | batch_size = prompt_embeds.shape[0] |
| |
|
| | |
| | tokenizers = [self.tokenizer, self.tokenizer_2] if self.tokenizer is not None else [self.tokenizer_2] |
| | text_encoders = ( |
| | [self.text_encoder, self.text_encoder_2] if self.text_encoder is not None else [self.text_encoder_2] |
| | ) |
| |
|
| | if prompt_embeds is None: |
| | prompt_2 = prompt_2 or prompt |
| | |
| | prompt_embeds_list = [] |
| | prompts = [prompt, prompt_2] |
| | for prompt, tokenizer, text_encoder in zip(prompts, tokenizers, text_encoders): |
| | if isinstance(self, TextualInversionLoaderMixin): |
| | prompt = self.maybe_convert_prompt(prompt, tokenizer) |
| |
|
| | text_inputs = tokenizer( |
| | prompt, |
| | padding="max_length", |
| | max_length=tokenizer.model_max_length, |
| | truncation=True, |
| | return_tensors="pt", |
| | ) |
| |
|
| | text_input_ids = text_inputs.input_ids |
| | untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids |
| |
|
| | if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( |
| | text_input_ids, untruncated_ids |
| | ): |
| | removed_text = tokenizer.batch_decode(untruncated_ids[:, tokenizer.model_max_length - 1 : -1]) |
| | logger.warning( |
| | "The following part of your input was truncated because CLIP can only handle sequences up to" |
| | f" {tokenizer.model_max_length} tokens: {removed_text}" |
| | ) |
| |
|
| | prompt_embeds = text_encoder( |
| | text_input_ids.to(device), |
| | output_hidden_states=True, |
| | ) |
| |
|
| | |
| | pooled_prompt_embeds = prompt_embeds[0] |
| | prompt_embeds = prompt_embeds.hidden_states[-2] |
| |
|
| | prompt_embeds_list.append(prompt_embeds) |
| |
|
| | prompt_embeds = torch.concat(prompt_embeds_list, dim=-1) |
| |
|
| | |
| | zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt |
| | if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: |
| | negative_prompt_embeds = torch.zeros_like(prompt_embeds) |
| | negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) |
| | elif do_classifier_free_guidance and negative_prompt_embeds is None: |
| | negative_prompt = negative_prompt or "" |
| | negative_prompt_2 = negative_prompt_2 or negative_prompt |
| |
|
| | uncond_tokens: List[str] |
| | if prompt is not None and type(prompt) is not type(negative_prompt): |
| | raise TypeError( |
| | f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" |
| | f" {type(prompt)}." |
| | ) |
| | elif isinstance(negative_prompt, str): |
| | uncond_tokens = [negative_prompt, negative_prompt_2] |
| | elif batch_size != len(negative_prompt): |
| | raise ValueError( |
| | f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" |
| | f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" |
| | " the batch size of `prompt`." |
| | ) |
| | else: |
| | uncond_tokens = [negative_prompt, negative_prompt_2] |
| |
|
| | negative_prompt_embeds_list = [] |
| | for negative_prompt, tokenizer, text_encoder in zip(uncond_tokens, tokenizers, text_encoders): |
| | if isinstance(self, TextualInversionLoaderMixin): |
| | negative_prompt = self.maybe_convert_prompt(negative_prompt, tokenizer) |
| |
|
| | max_length = prompt_embeds.shape[1] |
| | uncond_input = tokenizer( |
| | negative_prompt, |
| | padding="max_length", |
| | max_length=max_length, |
| | truncation=True, |
| | return_tensors="pt", |
| | ) |
| |
|
| | negative_prompt_embeds = text_encoder( |
| | uncond_input.input_ids.to(device), |
| | output_hidden_states=True, |
| | ) |
| | |
| | negative_pooled_prompt_embeds = negative_prompt_embeds[0] |
| | negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2] |
| |
|
| | negative_prompt_embeds_list.append(negative_prompt_embeds) |
| |
|
| | negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1) |
| |
|
| | prompt_embeds = prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) |
| | bs_embed, seq_len, _ = prompt_embeds.shape |
| | |
| | prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) |
| | prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) |
| |
|
| | if do_classifier_free_guidance: |
| | |
| | seq_len = negative_prompt_embeds.shape[1] |
| | negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder_2.dtype, device=device) |
| | negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) |
| | negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) |
| |
|
| | pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( |
| | bs_embed * num_images_per_prompt, -1 |
| | ) |
| | if do_classifier_free_guidance: |
| | negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( |
| | bs_embed * num_images_per_prompt, -1 |
| | ) |
| |
|
| | return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds |
| |
|
| | |
| | def prepare_extra_step_kwargs(self, generator, eta): |
| | |
| | |
| | |
| | |
| |
|
| | accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) |
| | extra_step_kwargs = {} |
| | if accepts_eta: |
| | extra_step_kwargs["eta"] = eta |
| |
|
| | |
| | accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) |
| | if accepts_generator: |
| | extra_step_kwargs["generator"] = generator |
| | return extra_step_kwargs |
| |
|
| | def check_inputs( |
| | self, |
| | prompt, |
| | prompt_2, |
| | height, |
| | width, |
| | callback_steps, |
| | negative_prompt=None, |
| | negative_prompt_2=None, |
| | prompt_embeds=None, |
| | negative_prompt_embeds=None, |
| | pooled_prompt_embeds=None, |
| | negative_pooled_prompt_embeds=None, |
| | ): |
| | if height % 8 != 0 or width % 8 != 0: |
| | raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") |
| |
|
| | if (callback_steps is None) or ( |
| | callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) |
| | ): |
| | raise ValueError( |
| | f"`callback_steps` has to be a positive integer but is {callback_steps} of type" |
| | f" {type(callback_steps)}." |
| | ) |
| |
|
| | if prompt is not None and prompt_embeds is not None: |
| | raise ValueError( |
| | f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" |
| | " only forward one of the two." |
| | ) |
| | elif prompt_2 is not None and prompt_embeds is not None: |
| | raise ValueError( |
| | f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to" |
| | " only forward one of the two." |
| | ) |
| | elif prompt is None and prompt_embeds is None: |
| | raise ValueError( |
| | "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." |
| | ) |
| | elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): |
| | raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") |
| | elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)): |
| | raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}") |
| |
|
| | if negative_prompt is not None and negative_prompt_embeds is not None: |
| | raise ValueError( |
| | f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" |
| | f" {negative_prompt_embeds}. Please make sure to only forward one of the two." |
| | ) |
| | elif negative_prompt_2 is not None and negative_prompt_embeds is not None: |
| | raise ValueError( |
| | f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:" |
| | f" {negative_prompt_embeds}. Please make sure to only forward one of the two." |
| | ) |
| |
|
| | if prompt_embeds is not None and negative_prompt_embeds is not None: |
| | if prompt_embeds.shape != negative_prompt_embeds.shape: |
| | raise ValueError( |
| | "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" |
| | f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" |
| | f" {negative_prompt_embeds.shape}." |
| | ) |
| |
|
| | if prompt_embeds is not None and pooled_prompt_embeds is None: |
| | raise ValueError( |
| | "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." |
| | ) |
| |
|
| | if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: |
| | raise ValueError( |
| | "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." |
| | ) |
| |
|
| | |
| | def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): |
| | shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) |
| | if isinstance(generator, list) and len(generator) != batch_size: |
| | raise ValueError( |
| | f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" |
| | f" size of {batch_size}. Make sure the batch size matches the length of the generators." |
| | ) |
| |
|
| | if latents is None: |
| | latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) |
| | else: |
| | latents = latents.to(device) |
| |
|
| | |
| | latents = latents * self.scheduler.init_noise_sigma |
| | return latents |
| |
|
| | def _get_add_time_ids(self, original_size, crops_coords_top_left, target_size, dtype): |
| | add_time_ids = list(original_size + crops_coords_top_left + target_size) |
| |
|
| | passed_add_embed_dim = ( |
| | self.unet.config.addition_time_embed_dim * len(add_time_ids) + self.text_encoder_2.config.projection_dim |
| | ) |
| | expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features |
| |
|
| | if expected_add_embed_dim != passed_add_embed_dim: |
| | raise ValueError( |
| | f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." |
| | ) |
| |
|
| | add_time_ids = torch.tensor([add_time_ids], dtype=dtype) |
| | return add_time_ids |
| |
|
| | |
| | def upcast_vae(self): |
| | dtype = self.vae.dtype |
| | self.vae.to(dtype=torch.float32) |
| | use_torch_2_0_or_xformers = isinstance( |
| | self.vae.decoder.mid_block.attentions[0].processor, |
| | ( |
| | AttnProcessor2_0, |
| | XFormersAttnProcessor, |
| | LoRAXFormersAttnProcessor, |
| | LoRAAttnProcessor2_0, |
| | ), |
| | ) |
| | |
| | |
| | if use_torch_2_0_or_xformers: |
| | self.vae.post_quant_conv.to(dtype) |
| | self.vae.decoder.conv_in.to(dtype) |
| | self.vae.decoder.mid_block.to(dtype) |
| |
|
| | @torch.no_grad() |
| | @replace_example_docstring(EXAMPLE_DOC_STRING) |
| | def __call__( |
| | self, |
| | prompt: str = None, |
| | prompt_2: Optional[str] = None, |
| | height: Optional[int] = None, |
| | width: Optional[int] = None, |
| | num_inference_steps: int = 50, |
| | denoising_end: Optional[float] = None, |
| | guidance_scale: float = 5.0, |
| | negative_prompt: Optional[str] = None, |
| | negative_prompt_2: Optional[str] = None, |
| | num_images_per_prompt: Optional[int] = 1, |
| | eta: float = 0.0, |
| | generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, |
| | latents: Optional[torch.FloatTensor] = None, |
| | prompt_embeds: Optional[torch.FloatTensor] = None, |
| | negative_prompt_embeds: Optional[torch.FloatTensor] = None, |
| | pooled_prompt_embeds: Optional[torch.FloatTensor] = None, |
| | negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, |
| | output_type: Optional[str] = "pil", |
| | return_dict: bool = True, |
| | callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, |
| | callback_steps: int = 1, |
| | cross_attention_kwargs: Optional[Dict[str, Any]] = None, |
| | guidance_rescale: float = 0.0, |
| | original_size: Optional[Tuple[int, int]] = None, |
| | crops_coords_top_left: Tuple[int, int] = (0, 0), |
| | target_size: Optional[Tuple[int, int]] = None, |
| | ): |
| | r""" |
| | Function invoked when calling the pipeline for generation. |
| | |
| | Args: |
| | prompt (`str`): |
| | The prompt to guide the image generation. If not defined, one has to pass `prompt_embeds`. |
| | instead. |
| | prompt_2 (`str`): |
| | The prompt to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is |
| | used in both text-encoders |
| | height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): |
| | The height in pixels of the generated image. |
| | width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): |
| | The width in pixels of the generated image. |
| | num_inference_steps (`int`, *optional*, defaults to 50): |
| | The number of denoising steps. More denoising steps usually lead to a higher quality image at the |
| | expense of slower inference. |
| | denoising_end (`float`, *optional*): |
| | When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be |
| | completed before it is intentionally prematurely terminated. As a result, the returned sample will |
| | still retain a substantial amount of noise as determined by the discrete timesteps selected by the |
| | scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a |
| | "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image |
| | Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) |
| | guidance_scale (`float`, *optional*, defaults to 5.0): |
| | Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). |
| | `guidance_scale` is defined as `w` of equation 2. of [Imagen |
| | Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > |
| | 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, |
| | usually at the expense of lower image quality. |
| | negative_prompt (`str`): |
| | The prompt not to guide the image generation. If not defined, one has to pass |
| | `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is |
| | less than `1`). |
| | negative_prompt_2 (`str`): |
| | The prompt not to guide the image generation to be sent to `tokenizer_2` and |
| | `text_encoder_2`. If not defined, `negative_prompt` is used in both text-encoders |
| | num_images_per_prompt (`int`, *optional*, defaults to 1): |
| | The number of images to generate per prompt. |
| | eta (`float`, *optional*, defaults to 0.0): |
| | Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to |
| | [`schedulers.DDIMScheduler`], will be ignored for others. |
| | generator (`torch.Generator` or `List[torch.Generator]`, *optional*): |
| | One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) |
| | to make generation deterministic. |
| | latents (`torch.FloatTensor`, *optional*): |
| | Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image |
| | generation. Can be used to tweak the same generation with different prompts. If not provided, a latents |
| | tensor will ge generated by sampling using the supplied random `generator`. |
| | prompt_embeds (`torch.FloatTensor`, *optional*): |
| | Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not |
| | provided, text embeddings will be generated from `prompt` input argument. |
| | negative_prompt_embeds (`torch.FloatTensor`, *optional*): |
| | Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt |
| | weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input |
| | argument. |
| | pooled_prompt_embeds (`torch.FloatTensor`, *optional*): |
| | Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. |
| | If not provided, pooled text embeddings will be generated from `prompt` input argument. |
| | negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): |
| | Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt |
| | weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` |
| | input argument. |
| | output_type (`str`, *optional*, defaults to `"pil"`): |
| | The output format of the generate image. Choose between |
| | [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. |
| | return_dict (`bool`, *optional*, defaults to `True`): |
| | Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead |
| | of a plain tuple. |
| | callback (`Callable`, *optional*): |
| | A function that will be called every `callback_steps` steps during inference. The function will be |
| | called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. |
| | callback_steps (`int`, *optional*, defaults to 1): |
| | The frequency at which the `callback` function will be called. If not specified, the callback will be |
| | called at every step. |
| | cross_attention_kwargs (`dict`, *optional*): |
| | A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under |
| | `self.processor` in |
| | [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). |
| | guidance_rescale (`float`, *optional*, defaults to 0.0): |
| | Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are |
| | Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of |
| | [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). |
| | Guidance rescale factor should fix overexposure when using zero terminal SNR. |
| | original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): |
| | If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. |
| | `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as |
| | explained in section 2.2 of |
| | [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). |
| | crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): |
| | `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position |
| | `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting |
| | `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of |
| | [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). |
| | target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): |
| | For most cases, `target_size` should be set to the desired height and width of the generated image. If |
| | not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in |
| | section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). |
| | |
| | Examples: |
| | |
| | Returns: |
| | [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] or `tuple`: |
| | [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] if `return_dict` is True, otherwise a |
| | `tuple`. When returning a tuple, the first element is a list with the generated images. |
| | """ |
| | |
| | height = height or self.default_sample_size * self.vae_scale_factor |
| | width = width or self.default_sample_size * self.vae_scale_factor |
| |
|
| | original_size = original_size or (height, width) |
| | target_size = target_size or (height, width) |
| |
|
| | |
| | self.check_inputs( |
| | prompt, |
| | prompt_2, |
| | height, |
| | width, |
| | callback_steps, |
| | negative_prompt, |
| | negative_prompt_2, |
| | prompt_embeds, |
| | negative_prompt_embeds, |
| | pooled_prompt_embeds, |
| | negative_pooled_prompt_embeds, |
| | ) |
| |
|
| | |
| | if prompt is not None and isinstance(prompt, str): |
| | batch_size = 1 |
| | elif prompt is not None and isinstance(prompt, list): |
| | batch_size = len(prompt) |
| | else: |
| | batch_size = prompt_embeds.shape[0] |
| |
|
| | device = self._execution_device |
| |
|
| | |
| | |
| | |
| | do_classifier_free_guidance = guidance_scale > 1.0 |
| |
|
| | |
| | (cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None) |
| |
|
| | negative_prompt = negative_prompt if negative_prompt is not None else "" |
| |
|
| | ( |
| | prompt_embeds, |
| | negative_prompt_embeds, |
| | pooled_prompt_embeds, |
| | negative_pooled_prompt_embeds, |
| | ) = get_weighted_text_embeddings_sdxl(pipe=self, prompt=prompt, neg_prompt=negative_prompt) |
| |
|
| | |
| | self.scheduler.set_timesteps(num_inference_steps, device=device) |
| |
|
| | timesteps = self.scheduler.timesteps |
| |
|
| | |
| | num_channels_latents = self.unet.config.in_channels |
| | latents = self.prepare_latents( |
| | batch_size * num_images_per_prompt, |
| | num_channels_latents, |
| | height, |
| | width, |
| | prompt_embeds.dtype, |
| | device, |
| | generator, |
| | latents, |
| | ) |
| |
|
| | |
| | extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) |
| |
|
| | |
| | add_text_embeds = pooled_prompt_embeds |
| | add_time_ids = self._get_add_time_ids( |
| | original_size, crops_coords_top_left, target_size, dtype=prompt_embeds.dtype |
| | ) |
| |
|
| | if do_classifier_free_guidance: |
| | prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) |
| | add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) |
| | add_time_ids = torch.cat([add_time_ids, add_time_ids], dim=0) |
| |
|
| | prompt_embeds = prompt_embeds.to(device) |
| | add_text_embeds = add_text_embeds.to(device) |
| | add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) |
| |
|
| | |
| | num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) |
| |
|
| | |
| | if denoising_end is not None and isinstance(denoising_end, float) and denoising_end > 0 and denoising_end < 1: |
| | discrete_timestep_cutoff = int( |
| | round( |
| | self.scheduler.config.num_train_timesteps |
| | - (denoising_end * self.scheduler.config.num_train_timesteps) |
| | ) |
| | ) |
| | num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) |
| | timesteps = timesteps[:num_inference_steps] |
| |
|
| | with self.progress_bar(total=num_inference_steps) as progress_bar: |
| | for i, t in enumerate(timesteps): |
| | |
| | latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents |
| |
|
| | latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) |
| |
|
| | |
| | added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} |
| | noise_pred = self.unet( |
| | latent_model_input, |
| | t, |
| | encoder_hidden_states=prompt_embeds, |
| | cross_attention_kwargs=cross_attention_kwargs, |
| | added_cond_kwargs=added_cond_kwargs, |
| | return_dict=False, |
| | )[0] |
| |
|
| | |
| | if do_classifier_free_guidance: |
| | noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) |
| | noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) |
| |
|
| | if do_classifier_free_guidance and guidance_rescale > 0.0: |
| | |
| | noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) |
| |
|
| | |
| | latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] |
| |
|
| | |
| | if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): |
| | progress_bar.update() |
| | if callback is not None and i % callback_steps == 0: |
| | step_idx = i // getattr(self.scheduler, "order", 1) |
| | callback(step_idx, t, latents) |
| |
|
| | if not output_type == "latent": |
| | |
| | needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast |
| |
|
| | if needs_upcasting: |
| | self.upcast_vae() |
| | latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) |
| |
|
| | image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] |
| |
|
| | |
| | if needs_upcasting: |
| | self.vae.to(dtype=torch.float16) |
| | else: |
| | image = latents |
| | return StableDiffusionXLPipelineOutput(images=image) |
| |
|
| | |
| | if self.watermark is not None: |
| | image = self.watermark.apply_watermark(image) |
| |
|
| | image = self.image_processor.postprocess(image, output_type=output_type) |
| |
|
| | |
| | if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: |
| | self.final_offload_hook.offload() |
| |
|
| | if not return_dict: |
| | return (image,) |
| |
|
| | return StableDiffusionXLPipelineOutput(images=image) |
| |
|
| | |
| | def load_lora_weights(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs): |
| | |
| | |
| | |
| | state_dict, network_alphas = self.lora_state_dict( |
| | pretrained_model_name_or_path_or_dict, |
| | unet_config=self.unet.config, |
| | **kwargs, |
| | ) |
| | self.load_lora_into_unet(state_dict, network_alphas=network_alphas, unet=self.unet) |
| |
|
| | text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k} |
| | if len(text_encoder_state_dict) > 0: |
| | self.load_lora_into_text_encoder( |
| | text_encoder_state_dict, |
| | network_alphas=network_alphas, |
| | text_encoder=self.text_encoder, |
| | prefix="text_encoder", |
| | lora_scale=self.lora_scale, |
| | ) |
| |
|
| | text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k} |
| | if len(text_encoder_2_state_dict) > 0: |
| | self.load_lora_into_text_encoder( |
| | text_encoder_2_state_dict, |
| | network_alphas=network_alphas, |
| | text_encoder=self.text_encoder_2, |
| | prefix="text_encoder_2", |
| | lora_scale=self.lora_scale, |
| | ) |
| |
|
| | @classmethod |
| | def save_lora_weights( |
| | self, |
| | save_directory: Union[str, os.PathLike], |
| | unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, |
| | text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, |
| | text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None, |
| | is_main_process: bool = True, |
| | weight_name: str = None, |
| | save_function: Callable = None, |
| | safe_serialization: bool = False, |
| | ): |
| | state_dict = {} |
| |
|
| | def pack_weights(layers, prefix): |
| | layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers |
| | layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()} |
| | return layers_state_dict |
| |
|
| | state_dict.update(pack_weights(unet_lora_layers, "unet")) |
| |
|
| | if text_encoder_lora_layers and text_encoder_2_lora_layers: |
| | state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder")) |
| | state_dict.update(pack_weights(text_encoder_2_lora_layers, "text_encoder_2")) |
| |
|
| | self.write_lora_layers( |
| | state_dict=state_dict, |
| | save_directory=save_directory, |
| | is_main_process=is_main_process, |
| | weight_name=weight_name, |
| | save_function=save_function, |
| | safe_serialization=safe_serialization, |
| | ) |
| |
|
| | def _remove_text_encoder_monkey_patch(self): |
| | self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder) |
| | self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder_2) |
| |
|