John6666 commited on
Commit
8490feb
1 Parent(s): f6871fd

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +95 -19
app.py CHANGED
@@ -5,34 +5,92 @@ from pathlib import Path
5
  from all_models import models
6
  from externalmod import gr_Interface_load
7
  from prompt_extend import extend_prompt
 
 
 
 
 
8
 
9
- text_gen1 = extend_prompt
 
10
  current_model = models[0]
11
-
12
  #text_gen1=gr.Interface.load("spaces/phenomenon1981/MagicPrompt-Stable-Diffusion")
13
  #text_gen1=gr.Interface.load("spaces/Yntec/prompt-extend")
14
  #text_gen1=gr.Interface.load("spaces/daspartho/prompt-extend")
15
  #text_gen1=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
16
 
17
- models2 = [gr_Interface_load(f"models/{m}",live=False, preprocess=True, postprocess=False) for m in models]
18
 
19
- def text_it1(inputs,text_gen1=text_gen1):
20
- go_t1=text_gen1(inputs)
21
  return(go_t1)
22
 
23
  def set_model(current_model):
24
  current_model = models[current_model]
25
  return gr.update(label=(f"{current_model}"))
26
 
27
- def send_it1(inputs, model_choice): #negative_prompt,
28
- proc1=models2[model_choice]
29
- output1=proc1(inputs)
 
30
  #negative_prompt=negative_prompt
31
- return(output1)
32
 
33
- css=""""""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
- with gr.Blocks(css=css) as myface:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  gr.HTML("""
37
  <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
38
  <div>
@@ -105,7 +163,17 @@ with gr.Blocks(css=css) as myface:
105
  model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index", value=current_model, interactive=True)
106
  with gr.Row():
107
  with gr.Column(scale=100):
108
- magic1 = gr.Textbox(label="Your Prompt", lines=4) #Positive
 
 
 
 
 
 
 
 
 
 
109
  #with gr.Column(scale=100):
110
  #negative_prompt=gr.Textbox(label="Negative Prompt", lines=1)
111
  gr.HTML("""<style> .gr-button {
@@ -137,7 +205,8 @@ with gr.Blocks(css=css) as myface:
137
  run = gr.Button("Generate Image")
138
  with gr.Row():
139
  with gr.Column():
140
- output1 = gr.Image(label=(f"{current_model}"))
 
141
 
142
  with gr.Row():
143
  with gr.Column(scale=50):
@@ -145,15 +214,22 @@ with gr.Blocks(css=css) as myface:
145
  see_prompts=gr.Button("Extend Idea -> overwrite the contents of the `Your Prompt´ box above")
146
  use_short=gr.Button("Copy the contents of this box to the `Your Prompt´ box above")
147
  def short_prompt(inputs):
148
- return(inputs)
149
 
150
  model_name1.change(set_model, inputs=model_name1, outputs=[output1])
151
 
152
- run.click(send_it1, inputs=[magic1, model_name1], outputs=[output1])
153
 
154
- use_short.click(short_prompt,inputs=[input_text], outputs=magic1)
 
 
 
 
 
 
 
155
 
156
- see_prompts.click(text_it1,inputs=[input_text], outputs=magic1)
157
 
158
- myface.queue()
159
- myface.launch(show_api=False)
 
5
  from all_models import models
6
  from externalmod import gr_Interface_load
7
  from prompt_extend import extend_prompt
8
+ from random import randint
9
+ import asyncio
10
+ from threading import RLock
11
+ lock = RLock()
12
+ HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
13
 
14
+ inference_timeout = 300
15
+ MAX_SEED = 2**32-1
16
  current_model = models[0]
17
+ text_gen1 = extend_prompt
18
  #text_gen1=gr.Interface.load("spaces/phenomenon1981/MagicPrompt-Stable-Diffusion")
19
  #text_gen1=gr.Interface.load("spaces/Yntec/prompt-extend")
20
  #text_gen1=gr.Interface.load("spaces/daspartho/prompt-extend")
21
  #text_gen1=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
22
 
23
+ models2 = [gr_Interface_load(f"models/{m}", live=False, preprocess=True, postprocess=False, hf_token=HF_TOKEN) for m in models]
24
 
25
+ def text_it1(inputs, text_gen1=text_gen1):
26
+ go_t1 = text_gen1(inputs)
27
  return(go_t1)
28
 
29
  def set_model(current_model):
30
  current_model = models[current_model]
31
  return gr.update(label=(f"{current_model}"))
32
 
33
+ def send_it1(inputs, model_choice, neg_input, height, width, steps, cfg, seed): #negative_prompt,
34
+ #proc1 = models2[model_choice]
35
+ #output1 = proc1(inputs)
36
+ output1 = gen_fn(model_choice, inputs, neg_input, height, width, steps, cfg, seed)
37
  #negative_prompt=negative_prompt
38
+ return (output1)
39
 
40
+ # https://huggingface.co/docs/api-inference/detailed_parameters
41
+ # https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
42
+ async def infer(model_index, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1, timeout=inference_timeout):
43
+ from pathlib import Path
44
+ kwargs = {}
45
+ if height is not None and height >= 256: kwargs["height"] = height
46
+ if width is not None and width >= 256: kwargs["width"] = width
47
+ if steps is not None and steps >= 1: kwargs["num_inference_steps"] = steps
48
+ if cfg is not None and cfg > 0: cfg = kwargs["guidance_scale"] = cfg
49
+ noise = ""
50
+ if seed >= 0: kwargs["seed"] = seed
51
+ else:
52
+ rand = randint(1, 500)
53
+ for i in range(rand):
54
+ noise += " "
55
+ task = asyncio.create_task(asyncio.to_thread(models2[model_index].fn,
56
+ prompt=f'{prompt} {noise}', negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
57
+ await asyncio.sleep(0)
58
+ try:
59
+ result = await asyncio.wait_for(task, timeout=timeout)
60
+ except (Exception, asyncio.TimeoutError) as e:
61
+ print(e)
62
+ print(f"Task timed out: {models2[model_index]}")
63
+ if not task.done(): task.cancel()
64
+ result = None
65
+ if task.done() and result is not None:
66
+ with lock:
67
+ png_path = "image.png"
68
+ result.save(png_path)
69
+ image = str(Path(png_path).resolve())
70
+ return image
71
+ return None
72
 
73
+ def gen_fn(model_index, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1):
74
+ try:
75
+ loop = asyncio.new_event_loop()
76
+ result = loop.run_until_complete(infer(model_index, prompt, nprompt,
77
+ height, width, steps, cfg, seed, inference_timeout))
78
+ except (Exception, asyncio.CancelledError) as e:
79
+ print(e)
80
+ print(f"Task aborted: {models2[model_index]}")
81
+ result = None
82
+ finally:
83
+ loop.close()
84
+ return result
85
+
86
+ css="""
87
+ #container { max-width: 1200px; margin: 0 auto; !important; }
88
+ .output { width=112px; height=112px; !important; }
89
+ .gallery { width=100%; min_height=768px; !important; }
90
+ .guide { text-align: center; !important; }
91
+ """
92
+
93
+ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css) as myface:
94
  gr.HTML("""
95
  <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
96
  <div>
 
163
  model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index", value=current_model, interactive=True)
164
  with gr.Row():
165
  with gr.Column(scale=100):
166
+ with gr.Group():
167
+ magic1 = gr.Textbox(label="Your Prompt", lines=4) #Positive
168
+ with gr.Accordion("Advanced", open=False, visible=True):
169
+ neg_input = gr.Textbox(label='Negative prompt:', lines=1)
170
+ with gr.Row():
171
+ width = gr.Number(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
172
+ height = gr.Number(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
173
+ with gr.Row():
174
+ steps = gr.Number(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
175
+ cfg = gr.Number(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
176
+ seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
177
  #with gr.Column(scale=100):
178
  #negative_prompt=gr.Textbox(label="Negative Prompt", lines=1)
179
  gr.HTML("""<style> .gr-button {
 
205
  run = gr.Button("Generate Image")
206
  with gr.Row():
207
  with gr.Column():
208
+ output1 = gr.Image(label=(f"{current_model}"), show_download_button=True, elem_classes="output",
209
+ interactive=False, show_share_button=False, format=".png")
210
 
211
  with gr.Row():
212
  with gr.Column(scale=50):
 
214
  see_prompts=gr.Button("Extend Idea -> overwrite the contents of the `Your Prompt´ box above")
215
  use_short=gr.Button("Copy the contents of this box to the `Your Prompt´ box above")
216
  def short_prompt(inputs):
217
+ return (inputs)
218
 
219
  model_name1.change(set_model, inputs=model_name1, outputs=[output1])
220
 
221
+ #run.click(send_it1, inputs=[magic1, model_name1, neg_input, height, width, steps, cfg, seed], outputs=[output1])
222
 
223
+ gr.on(
224
+ triggers=[run.click, magic1.submit],
225
+ fn=send_it1,
226
+ inputs=[magic1, model_name1, neg_input, height, width, steps, cfg, seed],
227
+ outputs=[output1],
228
+ )
229
+
230
+ use_short.click(short_prompt, inputs=[input_text], outputs=magic1)
231
 
232
+ see_prompts.click(text_it1, inputs=[input_text], outputs=magic1)
233
 
234
+ myface.queue(default_concurrency_limit=200, max_size=200)
235
+ myface.launch(show_api=False, max_threads=400)