import gradio as gr from diffusers import StableDiffusionPipeline import torch import huggingface_hub as hf import os hf.login(token=os.environ['model_token']) #remember to login with token before loading model def text_to_hair(prompt, guidance_scale=8, num_inference_steps=30, model_path ="CVH-vn1210/hair-model"): pipe = StableDiffusionPipeline.from_pretrained(os.environ['bmd4'], torch_dtype=torch.float16, use_auth_token=True) pipe.unet.load_attn_procs(model_path) pipe.to("cuda") image = pipe(prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale).images[0] #image.save(save_name) #comment if don't want to save image return image #PIL format demo = gr.Interface(fn=text_to_hair, inputs=["text", gr.Slider(5, 20, value=8, label="Guidance_scale", info="Choose between 5 and 20 to improve image's content"), gr.Slider(20, 500, value=20, label="Num_infer_steps", info="Choose between 20 and 500 to improve image's resolution")], outputs="image") demo.launch()