linoyts's picture
linoyts HF Staff
Update app.py
6bc6bb4 verified
import torch
import spaces
import gradio as gr
import random
import numpy as np
from diffusers import ZImagePipeline
# Load the pipeline once at startup
print("Loading Z-Image-Turbo pipeline...")
pipe = ZImagePipeline.from_pretrained(
"Tongyi-MAI/Z-Image-Turbo",
torch_dtype=torch.bfloat16,
low_cpu_mem_usage=False,
)
pipe_no_lora = ZImagePipeline.from_pretrained(
"Tongyi-MAI/Z-Image-Turbo",
torch_dtype=torch.bfloat16,
low_cpu_mem_usage=False,
)
pipe.load_lora_weights("Shakker-Labs/AWPortrait-Z", weight_name="AWPortrait-Z.safetensors", adapter_name="lora")
pipe.set_adapters(["lora",], adapter_weights=[1.])
pipe.fuse_lora(adapter_names=["lora"], lora_scale=.9)
pipe.unload_lora_weights()
pipe.to("cuda")
pipe_no_lora.to("cuda")
# ======== AoTI compilation + FA3 ========
pipe.transformer.layers._repeated_blocks = ["ZImageTransformerBlock"]
spaces.aoti_blocks_load(pipe.transformer.layers, "zerogpu-aoti/Z-Image", variant="fa3")
pipe_no_lora.transformer.layers._repeated_blocks = ["ZImageTransformerBlock"]
spaces.aoti_blocks_load(pipe_no_lora.transformer.layers, "zerogpu-aoti/Z-Image", variant="fa3")
MAX_SEED = np.iinfo(np.int32).max
print("Pipeline loaded!")
@spaces.GPU
def generate_image(prompt, height, width, num_inference_steps, seed=42, randomize_seed=True, progress=gr.Progress(track_tqdm=True)):
"""Generate an image from the given prompt."""
if randomize_seed:
seed = random.randint(0, MAX_SEED)
image = pipe(
prompt=prompt,
height=int(height),
width=int(width),
num_inference_steps=int(num_inference_steps),
guidance_scale=0.0, # Guidance should be 0 for Turbo models
generator = torch.Generator(device="cuda").manual_seed(seed)
).images[0]
image_no_lora = pipe_no_lora(
prompt=prompt,
height=int(height),
width=int(width),
num_inference_steps=int(num_inference_steps),
guidance_scale=0.0, # Guidance should be 0 for Turbo models
generator = torch.Generator(device="cuda").manual_seed(seed)
).images[0]
return (image_no_lora,image), seed
# Example prompts
examples = [
["A dramatic close-up high-fashion portrait with avant-garde futuristic styling, metallic accents, sculptural makeup, glowing rim light, hyperreal detail, cool-toned color palette, glossy finish, fashion campaign quality."],
]
css = """
#col-container { max-width: 950px; margin: 0 auto; }
.dark .progress-text { color: white !important; }
#examples { max-width: 950px; margin: 0 auto; }
.dark #examples button,
.dark #examples .example,
.dark #examples span {
color: white !important;
}
"""
# Build the Gradio interface
with gr.Blocks() as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(
"""
# Z-Image-Turbo Portrait✨
Generate high-quality portrait images with [Tongyi-MAI/Z-Image-Turbo](https://huggingface.co/Tongyi-MAI/Z-Image-Turbo) using [portrait-beauty LoRA by @dynamicwangs and Shakker Labs](https://huggingface.co/Shakker-Labs/AWPortrait-Z), for fast inference with enhanced details.
This turbo model generates images in just 8 inference steps!
"""
)
with gr.Row():
with gr.Column(scale=1):
prompt = gr.Textbox(
label="Prompt",
placeholder="Enter your image description...",
max_lines=4,
)
generate_btn = gr.Button("Generate", variant="primary")
with gr.Accordion("Advanced Settings", open=False):
with gr.Row():
height = gr.Slider(
minimum=512,
maximum=2048,
value=1024,
step=64,
label="Height",
)
width = gr.Slider(
minimum=512,
maximum=2048,
value=1024,
step=64,
label="Width",
)
with gr.Row():
num_inference_steps = gr.Slider(
minimum=1,
maximum=20,
value=9,
step=1,
label="Inference Steps",
info="9 steps results in 8 DiT forwards",
)
with gr.Row():
seed = gr.Number(
label="Seed",
value=42,
precision=0,
)
randomize_seed = gr.Checkbox(
label="Randomize Seed",
value=True,
)
with gr.Column(scale=1):
output_image = gr.ImageSlider(
label="Output (Left-with the LoRA, Right-without)",
type="pil",
)
gr.Examples(
examples=examples,
inputs=[prompt],
cache_examples=False,
elem_id="examples",
)
# Connect the generate button
generate_btn.click(
fn=generate_image,
inputs=[prompt, height, width, num_inference_steps, seed, randomize_seed],
outputs=[output_image, seed],
)
# Also allow generating by pressing Enter in the prompt box
prompt.submit(
fn=generate_image,
inputs=[prompt, height, width, num_inference_steps, seed, randomize_seed],
outputs=[output_image, seed],
)
if __name__ == "__main__":
demo.launch(mcp_server=True, theme=gr.themes.Citrus(), css=css)