Spaces:
Running
on
Zero
Running
on
Zero
import gradio as gr | |
import spaces | |
import torch | |
import os | |
from compel import Compel, ReturnedEmbeddingsType | |
from diffusers import DiffusionPipeline | |
model_name = os.environ.get('MODEL_NAME', 'UnfilteredAI/NSFW-gen-v2') | |
pipe = DiffusionPipeline.from_pretrained( | |
model_name, | |
torch_dtype=torch.float16 | |
) | |
pipe.to('cuda') | |
compel = Compel( | |
tokenizer=[pipe.tokenizer, pipe.tokenizer_2] , | |
text_encoder=[pipe.text_encoder, pipe.text_encoder_2], | |
returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED, | |
requires_pooled=[False, True] | |
) | |
def generate(prompt, negative_prompt, num_inference_steps, guidance_scale, width, height, num_samples): | |
embeds, pooled = compel(prompt) | |
neg_embeds, neg_pooled = compel(negative_prompt) | |
return pipe( | |
prompt_embeds=embeds, | |
pooled_prompt_embeds=pooled, | |
negative_prompt_embeds=neg_embeds, | |
negative_pooled_prompt_embeds=neg_pooled, | |
num_inference_steps=num_inference_steps, | |
guidance_scale=guidance_scale, | |
width=width, | |
height=height, | |
num_images_per_prompt=num_samples | |
).images | |
gr.Interface( | |
fn=generate, | |
inputs=[ | |
gr.Text(label="Prompt"), | |
gr.Text("", label="Negative Prompt"), | |
gr.Number(7, label="Number inference steps"), | |
gr.Number(3, label="Guidance scale"), | |
gr.Number(512, label="Width"), | |
gr.Number(512, label="Height"), | |
gr.Number(1, label="# images"), | |
], | |
outputs=gr.Gallery(), | |
).launch() |