import gradio as gr from diffusers import StableDiffusionPipeline import torch #import huggingface_hub as hf #remember to login with token before loading model def text_to_hair(prompt, num_inference_steps=200, guidance_scale=9, model_path ="./"): pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", torch_dtype=torch.float16, use_auth_token=True) pipe.unet.load_attn_procs(model_path) pipe.to("cuda") image = pipe(prompt, num_inference_steps=num_inference_steps, guidance_scale=8).images[0] #image.save(save_name) #comment if don't want to save image return image #PIL format demo = gr.Interface(fn=text_to_hair, inputs="text", outputs="image") demo.launch()