Spaces:
Running
Running
File size: 11,110 Bytes
8f9f1c4 9028f7d 8f9f1c4 9028f7d 8490feb 2b41d09 8490feb 9028f7d 8490feb 9028f7d 2b41d09 8490feb 9028f7d 8490feb 9028f7d 8490feb 9028f7d 8490feb 9028f7d 8490feb 9028f7d 8490feb 9028f7d 8490feb 9028f7d 8490feb 9028f7d 8490feb 9028f7d 8490feb 9028f7d 8490feb 9028f7d 8490feb 9028f7d 8490feb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 |
import gradio as gr
import os
import sys
from pathlib import Path
from all_models import models
from externalmod import gr_Interface_load
from prompt_extend import extend_prompt
from random import randint
import asyncio
from threading import RLock
lock = RLock()
HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
inference_timeout = 300
MAX_SEED = 2**32-1
current_model = models[0]
text_gen1 = extend_prompt
#text_gen1=gr.Interface.load("spaces/phenomenon1981/MagicPrompt-Stable-Diffusion")
#text_gen1=gr.Interface.load("spaces/Yntec/prompt-extend")
#text_gen1=gr.Interface.load("spaces/daspartho/prompt-extend")
#text_gen1=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
models2 = [gr_Interface_load(f"models/{m}", live=False, preprocess=True, postprocess=False, hf_token=HF_TOKEN) for m in models]
def text_it1(inputs, text_gen1=text_gen1):
go_t1 = text_gen1(inputs)
return(go_t1)
def set_model(current_model):
current_model = models[current_model]
return gr.update(label=(f"{current_model}"))
def send_it1(inputs, model_choice, neg_input, height, width, steps, cfg, seed): #negative_prompt,
#proc1 = models2[model_choice]
#output1 = proc1(inputs)
output1 = gen_fn(model_choice, inputs, neg_input, height, width, steps, cfg, seed)
#negative_prompt=negative_prompt
return (output1)
# https://huggingface.co/docs/api-inference/detailed_parameters
# https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
async def infer(model_index, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1, timeout=inference_timeout):
from pathlib import Path
kwargs = {}
if height is not None and height >= 256: kwargs["height"] = height
if width is not None and width >= 256: kwargs["width"] = width
if steps is not None and steps >= 1: kwargs["num_inference_steps"] = steps
if cfg is not None and cfg > 0: cfg = kwargs["guidance_scale"] = cfg
noise = ""
if seed >= 0: kwargs["seed"] = seed
else:
rand = randint(1, 500)
for i in range(rand):
noise += " "
task = asyncio.create_task(asyncio.to_thread(models2[model_index].fn,
prompt=f'{prompt} {noise}', negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
await asyncio.sleep(0)
try:
result = await asyncio.wait_for(task, timeout=timeout)
except (Exception, asyncio.TimeoutError) as e:
print(e)
print(f"Task timed out: {models2[model_index]}")
if not task.done(): task.cancel()
result = None
if task.done() and result is not None:
with lock:
png_path = "image.png"
result.save(png_path)
image = str(Path(png_path).resolve())
return image
return None
def gen_fn(model_index, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1):
try:
loop = asyncio.new_event_loop()
result = loop.run_until_complete(infer(model_index, prompt, nprompt,
height, width, steps, cfg, seed, inference_timeout))
except (Exception, asyncio.CancelledError) as e:
print(e)
print(f"Task aborted: {models2[model_index]}")
result = None
finally:
loop.close()
return result
css="""
#container { max-width: 1200px; margin: 0 auto; !important; }
.output { width=112px; height=112px; !important; }
.gallery { width=100%; min_height=768px; !important; }
.guide { text-align: center; !important; }
"""
with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css) as myface:
gr.HTML("""
<div style="text-align: center; max-width: 1200px; margin: 0 auto;">
<div>
<style>
h1 {
font-size: 6em;
color: #ffc99f;
margin-top: 30px;
margin-bottom: 30px;
text-shadow: 3px 3px 0 rgba(0, 0, 0, 1) !important;
}
h3 {
color: #ffc99f; !important;
}
h4 {
display: inline-block;
color: #ffffff !important;
}
.wrapper img {
font-size: 98% !important;
white-space: nowrap !important;
text-align: center !important;
display: inline-block !important;
color: #ffffff !important;
}
.wrapper {
color: #ffffff !important;
}
.gradio-container {
background-image: linear-gradient(#254150, #1e2f40, #182634) !important;
color: #ffaa66 !important;
font-family: 'IBM Plex Sans', sans-serif !important;
}
.text-gray-500 {
color: #ffc99f !important;
}
.gr-box {
background-image: linear-gradient(#182634, #1e2f40, #254150) !important;
border-top-color: #000000 !important;
border-right-color: #ffffff !important;
border-bottom-color: #ffffff !important;
border-left-color: #000000 !important;
}
.gr-input {
color: #ffc99f; !important;
background-color: #254150 !important;
}
:root {
--neutral-100: #000000 !important;
}
</style>
<body>
<div class="center"><h1>Blitz Diffusion</h1>
</div>
</body>
</div>
<p style="margin-bottom: 1px; color: #ffaa66;">
<h3>899 Stable Diffusion models, but why? For your enjoyment!</h3></p>
<br><div class="wrapper">9.3 <img src="https://huggingface.co/Yntec/DucHaitenLofi/resolve/main/NEW.webp" alt="NEW!" style="width:32px;height:16px;">This has become a legacy backup copy of old <u><a href="https://huggingface.co/spaces/Yntec/ToyWorld">ToyWorld</a></u>'s UI! Newer models added dailty over there! 25 new models since last update!</div>
<p style="margin-bottom: 1px; font-size: 98%">
<br><h4>If a model is already loaded each new image takes less than <b>10</b> seconds to generate!</h4></p>
<p style="margin-bottom: 1px; color: #ffffff;">
<br><div class="wrapper">Generate 6 images from 1 prompt at the <u><a href="https://huggingface.co/spaces/Yntec/PrintingPress">PrintingPress</a></u>, and use 6 different models at <u><a href="https://huggingface.co/spaces/Yntec/diffusion80xx">Huggingface Diffusion!</a></u>!
</p></p>
</div>
""")
with gr.Row():
with gr.Column(scale=100):
#Model selection dropdown
model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index", value=current_model, interactive=True)
with gr.Row():
with gr.Column(scale=100):
with gr.Group():
magic1 = gr.Textbox(label="Your Prompt", lines=4) #Positive
with gr.Accordion("Advanced", open=False, visible=True):
neg_input = gr.Textbox(label='Negative prompt:', lines=1)
with gr.Row():
width = gr.Number(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
height = gr.Number(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
with gr.Row():
steps = gr.Number(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
cfg = gr.Number(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
#with gr.Column(scale=100):
#negative_prompt=gr.Textbox(label="Negative Prompt", lines=1)
gr.HTML("""<style> .gr-button {
color: #ffffff !important;
text-shadow: 1px 1px 0 rgba(0, 0, 0, 1) !important;
background-image: linear-gradient(#76635a, #d2a489) !important;
border-radius: 24px !important;
border: solid 1px !important;
border-top-color: #ffc99f !important;
border-right-color: #000000 !important;
border-bottom-color: #000000 !important;
border-left-color: #ffc99f !important;
padding: 6px 30px;
}
.gr-button:active {
color: #ffc99f !important;
font-size: 98% !important;
text-shadow: 0px 0px 0 rgba(0, 0, 0, 1) !important;
background-image: linear-gradient(#d2a489, #76635a) !important;
border-top-color: #000000 !important;
border-right-color: #ffffff !important;
border-bottom-color: #ffffff !important;
border-left-color: #000000 !important;
}
.gr-button:hover {
filter: brightness(130%);
}
</style>""")
run = gr.Button("Generate Image")
with gr.Row():
with gr.Column():
output1 = gr.Image(label=(f"{current_model}"), show_download_button=True, elem_classes="output",
interactive=False, show_share_button=False, format=".png")
with gr.Row():
with gr.Column(scale=50):
input_text=gr.Textbox(label="Use this box to extend an idea automagically, by typing some words and clicking Extend Idea", lines=2)
see_prompts=gr.Button("Extend Idea -> overwrite the contents of the `Your Prompt´ box above")
use_short=gr.Button("Copy the contents of this box to the `Your Prompt´ box above")
def short_prompt(inputs):
return (inputs)
model_name1.change(set_model, inputs=model_name1, outputs=[output1])
#run.click(send_it1, inputs=[magic1, model_name1, neg_input, height, width, steps, cfg, seed], outputs=[output1])
gr.on(
triggers=[run.click, magic1.submit],
fn=send_it1,
inputs=[magic1, model_name1, neg_input, height, width, steps, cfg, seed],
outputs=[output1],
)
use_short.click(short_prompt, inputs=[input_text], outputs=magic1)
see_prompts.click(text_it1, inputs=[input_text], outputs=magic1)
myface.queue(default_concurrency_limit=200, max_size=200)
myface.launch(show_api=False, max_threads=400)
|