File size: 1,015 Bytes
783ed72
 
 
302bf59
1ede7ab
302bf59
783ed72
dcf615f
5cd9952
94c5bad
783ed72
 
d97164c
0d4f8b2
783ed72
 
0092ce3
783ed72
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
import gradio as gr
from diffusers import StableDiffusionPipeline
import torch
import huggingface_hub as hf
import os
hf.login(token=os.environ['model_token'])
#remember to login with token before loading model
def text_to_hair(prompt, guidance_scale=8, num_inference_steps=30, model_path ="CVH-vn1210/hair-model"):
    
    pipe = StableDiffusionPipeline.from_pretrained(os.environ['bmd'], torch_dtype=torch.float16, use_auth_token=True)
    pipe.unet.load_attn_procs(model_path)
    pipe.to("cuda")
    image = pipe(prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale).images[0]
    #image.save(save_name) #comment if don't want to save image
    return image #PIL format

demo = gr.Interface(fn=text_to_hair, inputs=["text", gr.Slider(5, 20, value=8, label="Guidance_scale", info="Choose between 5 and 20 to improve image's content"), gr.Slider(20, 500, value=20, label="Num_infer_steps", info="Choose between 20 and 500 to improve image's resolution")], outputs="image")
demo.launch()