Spaces:
Runtime error
Runtime error
import time | |
import gradio as gr | |
import torch | |
from PIL import Image | |
from diffusers.image_processor import VaeImageProcessor | |
from diffusers.schedulers import ( | |
DDPMScheduler, | |
DDIMScheduler, | |
EulerAncestralDiscreteScheduler, | |
DPMSolverMultistepScheduler, | |
PNDMScheduler, | |
EulerDiscreteScheduler, | |
) | |
from diffusers.utils.torch_utils import randn_tensor | |
MODEL_IDS = { | |
'Stable Diffusion v1.4': 'CompVis/stable-diffusion-v1-4', | |
'Stable Diffusion v3 medium': 'stabilityai/stable-diffusion-3-medium-diffusers', | |
} | |
SCHEDULERS = { | |
"DDPMScheduler": DDPMScheduler, | |
"DDIMScheduler": DDIMScheduler, | |
"PNDMScheduler": PNDMScheduler, | |
"EulerDiscreteScheduler": EulerDiscreteScheduler, | |
"EulerAncestralDiscreteScheduler": EulerAncestralDiscreteScheduler, | |
"DPMSolverMultistepScheduler": DPMSolverMultistepScheduler, | |
} | |
def inference( | |
image_pil: Image.Image, | |
model_name: str, | |
scheduler_name: str, | |
per_step_time: int = 1, | |
n_total_steps: int = 1000, | |
): | |
yield image_pil, "" | |
scheduler = SCHEDULERS[scheduler_name].from_pretrained(MODEL_IDS[model_name], subfolder='scheduler') | |
scheduler.set_timesteps(num_inference_steps=n_total_steps) | |
timesteps = torch.flip(scheduler.timesteps, dims=[0]) | |
image_processor = VaeImageProcessor() | |
image_tensor = image_processor.preprocess(image_pil) | |
# Fix seed | |
generator = torch.Generator().manual_seed(1117) | |
noise = randn_tensor(image_tensor.shape, generator) | |
for i, t in enumerate(timesteps): | |
t = torch.tensor([t]) | |
noised_image_tensor = scheduler.add_noise(image_tensor, noise, timesteps=t) | |
noised_image_pil = image_processor.postprocess(noised_image_tensor)[0] | |
time.sleep(per_step_time) | |
# language=HTML | |
info_html = f""" | |
<div class="info-step"> | |
<span class="step-number">Step {i + 1}</span> / {n_total_steps} | |
</div> | |
""" | |
yield noised_image_pil, info_html | |
if __name__ == '__main__': | |
demo = gr.Interface( | |
title='Noisescope', | |
description='', | |
fn=inference, | |
inputs=[ | |
gr.Image(type='pil', label='Input Image'), | |
gr.Dropdown(list(MODEL_IDS.keys()), value='Stable Diffusion v1.4', label='Model ID'), | |
gr.Dropdown(list(SCHEDULERS.keys()), value='DDPMScheduler', label='Scheduler'), | |
gr.Radio(choices=[0, 0.01, 0.1, 1], value=0, label='Per-Step time'), | |
gr.Radio(choices=[10, 25, 50, 100, 1000], value=50, label='Total Steps'), | |
], | |
outputs=[ | |
gr.Image(type='pil', label='Noised Image'), | |
gr.HTML(label='Timestep Info'), | |
], | |
# language=css | |
css=""" | |
body { font-family: Arial, sans-serif; background-color: #f0f0f5; } | |
h1 { color: #3c3c3c; } | |
.gradio-container { max-width: 800px; margin: auto; padding: 20px; background: white; border-radius: 10px; box-shadow: 0px 0px 15px rgba(0, 0, 0, 0.1); } | |
.info-step { padding: 10px; background: #3c3c3c; color: white; border-radius: 5px; margin-bottom: 10px; } | |
.step-number { font-weight: bold; color: #FFD700; } | |
""", | |
cache_examples=True, | |
examples=[ | |
[Image.open("assets/corgi.png"), 'Stable Diffusion v1.4', 'DDIMScheduler', 0, 50], | |
], | |
) | |
demo.launch() | |