Blane187 commited on
Commit
9d6905f
1 Parent(s): 8490feb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +235 -235
app.py CHANGED
@@ -1,235 +1,235 @@
1
- import gradio as gr
2
- import os
3
- import sys
4
- from pathlib import Path
5
- from all_models import models
6
- from externalmod import gr_Interface_load
7
- from prompt_extend import extend_prompt
8
- from random import randint
9
- import asyncio
10
- from threading import RLock
11
- lock = RLock()
12
- HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
13
-
14
- inference_timeout = 300
15
- MAX_SEED = 2**32-1
16
- current_model = models[0]
17
- text_gen1 = extend_prompt
18
- #text_gen1=gr.Interface.load("spaces/phenomenon1981/MagicPrompt-Stable-Diffusion")
19
- #text_gen1=gr.Interface.load("spaces/Yntec/prompt-extend")
20
- #text_gen1=gr.Interface.load("spaces/daspartho/prompt-extend")
21
- #text_gen1=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
22
-
23
- models2 = [gr_Interface_load(f"models/{m}", live=False, preprocess=True, postprocess=False, hf_token=HF_TOKEN) for m in models]
24
-
25
- def text_it1(inputs, text_gen1=text_gen1):
26
- go_t1 = text_gen1(inputs)
27
- return(go_t1)
28
-
29
- def set_model(current_model):
30
- current_model = models[current_model]
31
- return gr.update(label=(f"{current_model}"))
32
-
33
- def send_it1(inputs, model_choice, neg_input, height, width, steps, cfg, seed): #negative_prompt,
34
- #proc1 = models2[model_choice]
35
- #output1 = proc1(inputs)
36
- output1 = gen_fn(model_choice, inputs, neg_input, height, width, steps, cfg, seed)
37
- #negative_prompt=negative_prompt
38
- return (output1)
39
-
40
- # https://huggingface.co/docs/api-inference/detailed_parameters
41
- # https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
42
- async def infer(model_index, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1, timeout=inference_timeout):
43
- from pathlib import Path
44
- kwargs = {}
45
- if height is not None and height >= 256: kwargs["height"] = height
46
- if width is not None and width >= 256: kwargs["width"] = width
47
- if steps is not None and steps >= 1: kwargs["num_inference_steps"] = steps
48
- if cfg is not None and cfg > 0: cfg = kwargs["guidance_scale"] = cfg
49
- noise = ""
50
- if seed >= 0: kwargs["seed"] = seed
51
- else:
52
- rand = randint(1, 500)
53
- for i in range(rand):
54
- noise += " "
55
- task = asyncio.create_task(asyncio.to_thread(models2[model_index].fn,
56
- prompt=f'{prompt} {noise}', negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
57
- await asyncio.sleep(0)
58
- try:
59
- result = await asyncio.wait_for(task, timeout=timeout)
60
- except (Exception, asyncio.TimeoutError) as e:
61
- print(e)
62
- print(f"Task timed out: {models2[model_index]}")
63
- if not task.done(): task.cancel()
64
- result = None
65
- if task.done() and result is not None:
66
- with lock:
67
- png_path = "image.png"
68
- result.save(png_path)
69
- image = str(Path(png_path).resolve())
70
- return image
71
- return None
72
-
73
- def gen_fn(model_index, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1):
74
- try:
75
- loop = asyncio.new_event_loop()
76
- result = loop.run_until_complete(infer(model_index, prompt, nprompt,
77
- height, width, steps, cfg, seed, inference_timeout))
78
- except (Exception, asyncio.CancelledError) as e:
79
- print(e)
80
- print(f"Task aborted: {models2[model_index]}")
81
- result = None
82
- finally:
83
- loop.close()
84
- return result
85
-
86
- css="""
87
- #container { max-width: 1200px; margin: 0 auto; !important; }
88
- .output { width=112px; height=112px; !important; }
89
- .gallery { width=100%; min_height=768px; !important; }
90
- .guide { text-align: center; !important; }
91
- """
92
-
93
- with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css) as myface:
94
- gr.HTML("""
95
- <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
96
- <div>
97
- <style>
98
- h1 {
99
- font-size: 6em;
100
- color: #ffc99f;
101
- margin-top: 30px;
102
- margin-bottom: 30px;
103
- text-shadow: 3px 3px 0 rgba(0, 0, 0, 1) !important;
104
- }
105
- h3 {
106
- color: #ffc99f; !important;
107
- }
108
- h4 {
109
- display: inline-block;
110
- color: #ffffff !important;
111
- }
112
- .wrapper img {
113
- font-size: 98% !important;
114
- white-space: nowrap !important;
115
- text-align: center !important;
116
- display: inline-block !important;
117
- color: #ffffff !important;
118
- }
119
- .wrapper {
120
- color: #ffffff !important;
121
- }
122
- .gradio-container {
123
- background-image: linear-gradient(#254150, #1e2f40, #182634) !important;
124
- color: #ffaa66 !important;
125
- font-family: 'IBM Plex Sans', sans-serif !important;
126
- }
127
- .text-gray-500 {
128
- color: #ffc99f !important;
129
- }
130
- .gr-box {
131
- background-image: linear-gradient(#182634, #1e2f40, #254150) !important;
132
- border-top-color: #000000 !important;
133
- border-right-color: #ffffff !important;
134
- border-bottom-color: #ffffff !important;
135
- border-left-color: #000000 !important;
136
- }
137
- .gr-input {
138
- color: #ffc99f; !important;
139
- background-color: #254150 !important;
140
- }
141
- :root {
142
- --neutral-100: #000000 !important;
143
- }
144
- </style>
145
- <body>
146
- <div class="center"><h1>Blitz Diffusion</h1>
147
- </div>
148
- </body>
149
- </div>
150
- <p style="margin-bottom: 1px; color: #ffaa66;">
151
- <h3>899 Stable Diffusion models, but why? For your enjoyment!</h3></p>
152
- <br><div class="wrapper">9.3 <img src="https://huggingface.co/Yntec/DucHaitenLofi/resolve/main/NEW.webp" alt="NEW!" style="width:32px;height:16px;">This has become a legacy backup copy of old <u><a href="https://huggingface.co/spaces/Yntec/ToyWorld">ToyWorld</a></u>'s UI! Newer models added dailty over there! 25 new models since last update!</div>
153
- <p style="margin-bottom: 1px; font-size: 98%">
154
- <br><h4>If a model is already loaded each new image takes less than <b>10</b> seconds to generate!</h4></p>
155
- <p style="margin-bottom: 1px; color: #ffffff;">
156
- <br><div class="wrapper">Generate 6 images from 1 prompt at the <u><a href="https://huggingface.co/spaces/Yntec/PrintingPress">PrintingPress</a></u>, and use 6 different models at <u><a href="https://huggingface.co/spaces/Yntec/diffusion80xx">Huggingface Diffusion!</a></u>!
157
- </p></p>
158
- </div>
159
- """)
160
- with gr.Row():
161
- with gr.Column(scale=100):
162
- #Model selection dropdown
163
- model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index", value=current_model, interactive=True)
164
- with gr.Row():
165
- with gr.Column(scale=100):
166
- with gr.Group():
167
- magic1 = gr.Textbox(label="Your Prompt", lines=4) #Positive
168
- with gr.Accordion("Advanced", open=False, visible=True):
169
- neg_input = gr.Textbox(label='Negative prompt:', lines=1)
170
- with gr.Row():
171
- width = gr.Number(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
172
- height = gr.Number(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
173
- with gr.Row():
174
- steps = gr.Number(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
175
- cfg = gr.Number(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
176
- seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
177
- #with gr.Column(scale=100):
178
- #negative_prompt=gr.Textbox(label="Negative Prompt", lines=1)
179
- gr.HTML("""<style> .gr-button {
180
- color: #ffffff !important;
181
- text-shadow: 1px 1px 0 rgba(0, 0, 0, 1) !important;
182
- background-image: linear-gradient(#76635a, #d2a489) !important;
183
- border-radius: 24px !important;
184
- border: solid 1px !important;
185
- border-top-color: #ffc99f !important;
186
- border-right-color: #000000 !important;
187
- border-bottom-color: #000000 !important;
188
- border-left-color: #ffc99f !important;
189
- padding: 6px 30px;
190
- }
191
- .gr-button:active {
192
- color: #ffc99f !important;
193
- font-size: 98% !important;
194
- text-shadow: 0px 0px 0 rgba(0, 0, 0, 1) !important;
195
- background-image: linear-gradient(#d2a489, #76635a) !important;
196
- border-top-color: #000000 !important;
197
- border-right-color: #ffffff !important;
198
- border-bottom-color: #ffffff !important;
199
- border-left-color: #000000 !important;
200
- }
201
- .gr-button:hover {
202
- filter: brightness(130%);
203
- }
204
- </style>""")
205
- run = gr.Button("Generate Image")
206
- with gr.Row():
207
- with gr.Column():
208
- output1 = gr.Image(label=(f"{current_model}"), show_download_button=True, elem_classes="output",
209
- interactive=False, show_share_button=False, format=".png")
210
-
211
- with gr.Row():
212
- with gr.Column(scale=50):
213
- input_text=gr.Textbox(label="Use this box to extend an idea automagically, by typing some words and clicking Extend Idea", lines=2)
214
- see_prompts=gr.Button("Extend Idea -> overwrite the contents of the `Your Prompt´ box above")
215
- use_short=gr.Button("Copy the contents of this box to the `Your Prompt´ box above")
216
- def short_prompt(inputs):
217
- return (inputs)
218
-
219
- model_name1.change(set_model, inputs=model_name1, outputs=[output1])
220
-
221
- #run.click(send_it1, inputs=[magic1, model_name1, neg_input, height, width, steps, cfg, seed], outputs=[output1])
222
-
223
- gr.on(
224
- triggers=[run.click, magic1.submit],
225
- fn=send_it1,
226
- inputs=[magic1, model_name1, neg_input, height, width, steps, cfg, seed],
227
- outputs=[output1],
228
- )
229
-
230
- use_short.click(short_prompt, inputs=[input_text], outputs=magic1)
231
-
232
- see_prompts.click(text_it1, inputs=[input_text], outputs=magic1)
233
-
234
- myface.queue(default_concurrency_limit=200, max_size=200)
235
- myface.launch(show_api=False, max_threads=400)
 
1
+ import gradio as gr
2
+ import os
3
+ import sys
4
+ from pathlib import Path
5
+ from all_models import models
6
+ from externalmod import gr_Interface_load
7
+ from prompt_extend import extend_prompt
8
+ from random import randint
9
+ import asyncio
10
+ from threading import RLock
11
+ lock = RLock()
12
+ HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
13
+
14
+ inference_timeout = 300
15
+ MAX_SEED = 2**32-1
16
+ current_model = models[0]
17
+ text_gen1 = extend_prompt
18
+ #text_gen1=gr.Interface.load("spaces/phenomenon1981/MagicPrompt-Stable-Diffusion")
19
+ #text_gen1=gr.Interface.load("spaces/Yntec/prompt-extend")
20
+ #text_gen1=gr.Interface.load("spaces/daspartho/prompt-extend")
21
+ #text_gen1=gr.Interface.load("spaces/Omnibus/MagicPrompt-Stable-Diffusion_link")
22
+
23
+ models2 = [gr_Interface_load(f"models/{m}", live=False, preprocess=True, postprocess=False, hf_token=HF_TOKEN) for m in models]
24
+
25
+ def text_it1(inputs, text_gen1=text_gen1):
26
+ go_t1 = text_gen1(inputs)
27
+ return(go_t1)
28
+
29
+ def set_model(current_model):
30
+ current_model = models[current_model]
31
+ return gr.update(label=(f"{current_model}"))
32
+
33
+ def send_it1(inputs, model_choice, neg_input, height, width, steps, cfg, seed): #negative_prompt,
34
+ #proc1 = models2[model_choice]
35
+ #output1 = proc1(inputs)
36
+ output1 = gen_fn(model_choice, inputs, neg_input, height, width, steps, cfg, seed)
37
+ #negative_prompt=negative_prompt
38
+ return (output1)
39
+
40
+ # https://huggingface.co/docs/api-inference/detailed_parameters
41
+ # https://huggingface.co/docs/huggingface_hub/package_reference/inference_client
42
+ async def infer(model_index, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1, timeout=inference_timeout):
43
+ from pathlib import Path
44
+ kwargs = {}
45
+ if height is not None and height >= 256: kwargs["height"] = height
46
+ if width is not None and width >= 256: kwargs["width"] = width
47
+ if steps is not None and steps >= 1: kwargs["num_inference_steps"] = steps
48
+ if cfg is not None and cfg > 0: cfg = kwargs["guidance_scale"] = cfg
49
+ noise = ""
50
+ if seed >= 0: kwargs["seed"] = seed
51
+ else:
52
+ rand = randint(1, 500)
53
+ for i in range(rand):
54
+ noise += " "
55
+ task = asyncio.create_task(asyncio.to_thread(models2[model_index].fn,
56
+ prompt=f'{prompt} {noise}', negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
57
+ await asyncio.sleep(0)
58
+ try:
59
+ result = await asyncio.wait_for(task, timeout=timeout)
60
+ except (Exception, asyncio.TimeoutError) as e:
61
+ print(e)
62
+ print(f"Task timed out: {models2[model_index]}")
63
+ if not task.done(): task.cancel()
64
+ result = None
65
+ if task.done() and result is not None:
66
+ with lock:
67
+ png_path = "image.png"
68
+ result.save(png_path)
69
+ image = str(Path(png_path).resolve())
70
+ return image
71
+ return None
72
+
73
+ def gen_fn(model_index, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1):
74
+ try:
75
+ loop = asyncio.new_event_loop()
76
+ result = loop.run_until_complete(infer(model_index, prompt, nprompt,
77
+ height, width, steps, cfg, seed, inference_timeout))
78
+ except (Exception, asyncio.CancelledError) as e:
79
+ print(e)
80
+ print(f"Task aborted: {models2[model_index]}")
81
+ result = None
82
+ finally:
83
+ loop.close()
84
+ return result
85
+
86
+ css="""
87
+ #container { max-width: 1200px; margin: 0 auto; !important; }
88
+ .output { width=112px; height=112px; !important; }
89
+ .gallery { width=100%; min_height=768px; !important; }
90
+ .guide { text-align: center; !important; }
91
+ """
92
+
93
+ with gr.Blocks(theme='Hev832/Applio', fill_width=True) as myface:
94
+ gr.HTML("""
95
+ <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
96
+ <div>
97
+ <style>
98
+ h1 {
99
+ font-size: 6em;
100
+ color: #ffc99f;
101
+ margin-top: 30px;
102
+ margin-bottom: 30px;
103
+ text-shadow: 3px 3px 0 rgba(0, 0, 0, 1) !important;
104
+ }
105
+ h3 {
106
+ color: #ffc99f; !important;
107
+ }
108
+ h4 {
109
+ display: inline-block;
110
+ color: #ffffff !important;
111
+ }
112
+ .wrapper img {
113
+ font-size: 98% !important;
114
+ white-space: nowrap !important;
115
+ text-align: center !important;
116
+ display: inline-block !important;
117
+ color: #ffffff !important;
118
+ }
119
+ .wrapper {
120
+ color: #ffffff !important;
121
+ }
122
+ .gradio-container {
123
+ background-image: linear-gradient(#254150, #1e2f40, #182634) !important;
124
+ color: #ffaa66 !important;
125
+ font-family: 'IBM Plex Sans', sans-serif !important;
126
+ }
127
+ .text-gray-500 {
128
+ color: #ffc99f !important;
129
+ }
130
+ .gr-box {
131
+ background-image: linear-gradient(#182634, #1e2f40, #254150) !important;
132
+ border-top-color: #000000 !important;
133
+ border-right-color: #ffffff !important;
134
+ border-bottom-color: #ffffff !important;
135
+ border-left-color: #000000 !important;
136
+ }
137
+ .gr-input {
138
+ color: #ffc99f; !important;
139
+ background-color: #254150 !important;
140
+ }
141
+ :root {
142
+ --neutral-100: #000000 !important;
143
+ }
144
+ </style>
145
+ <body>
146
+ <div class="center"><h1>Blitz Diffusion</h1>
147
+ </div>
148
+ </body>
149
+ </div>
150
+ <p style="margin-bottom: 1px; color: #ffaa66;">
151
+ <h3>899 Stable Diffusion models, but why? For your enjoyment!</h3></p>
152
+ <br><div class="wrapper">9.3 <img src="https://huggingface.co/Yntec/DucHaitenLofi/resolve/main/NEW.webp" alt="NEW!" style="width:32px;height:16px;">This has become a legacy backup copy of old <u><a href="https://huggingface.co/spaces/Yntec/ToyWorld">ToyWorld</a></u>'s UI! Newer models added dailty over there! 25 new models since last update!</div>
153
+ <p style="margin-bottom: 1px; font-size: 98%">
154
+ <br><h4>If a model is already loaded each new image takes less than <b>10</b> seconds to generate!</h4></p>
155
+ <p style="margin-bottom: 1px; color: #ffffff;">
156
+ <br><div class="wrapper">Generate 6 images from 1 prompt at the <u><a href="https://huggingface.co/spaces/Yntec/PrintingPress">PrintingPress</a></u>, and use 6 different models at <u><a href="https://huggingface.co/spaces/Yntec/diffusion80xx">Huggingface Diffusion!</a></u>!
157
+ </p></p>
158
+ </div>
159
+ """)
160
+ with gr.Row():
161
+ with gr.Column(scale=100):
162
+ #Model selection dropdown
163
+ model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index", value=current_model, interactive=True)
164
+ with gr.Row():
165
+ with gr.Column(scale=100):
166
+ with gr.Group():
167
+ magic1 = gr.Textbox(label="Your Prompt", lines=4) #Positive
168
+ with gr.Accordion("Advanced", open=False, visible=True):
169
+ neg_input = gr.Textbox(label='Negative prompt:', lines=1)
170
+ with gr.Row():
171
+ width = gr.Number(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
172
+ height = gr.Number(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0)
173
+ with gr.Row():
174
+ steps = gr.Number(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0)
175
+ cfg = gr.Number(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
176
+ seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
177
+ #with gr.Column(scale=100):
178
+ #negative_prompt=gr.Textbox(label="Negative Prompt", lines=1)
179
+ gr.HTML("""<style> .gr-button {
180
+ color: #ffffff !important;
181
+ text-shadow: 1px 1px 0 rgba(0, 0, 0, 1) !important;
182
+ background-image: linear-gradient(#76635a, #d2a489) !important;
183
+ border-radius: 24px !important;
184
+ border: solid 1px !important;
185
+ border-top-color: #ffc99f !important;
186
+ border-right-color: #000000 !important;
187
+ border-bottom-color: #000000 !important;
188
+ border-left-color: #ffc99f !important;
189
+ padding: 6px 30px;
190
+ }
191
+ .gr-button:active {
192
+ color: #ffc99f !important;
193
+ font-size: 98% !important;
194
+ text-shadow: 0px 0px 0 rgba(0, 0, 0, 1) !important;
195
+ background-image: linear-gradient(#d2a489, #76635a) !important;
196
+ border-top-color: #000000 !important;
197
+ border-right-color: #ffffff !important;
198
+ border-bottom-color: #ffffff !important;
199
+ border-left-color: #000000 !important;
200
+ }
201
+ .gr-button:hover {
202
+ filter: brightness(130%);
203
+ }
204
+ </style>""")
205
+ run = gr.Button("Generate Image")
206
+ with gr.Row():
207
+ with gr.Column():
208
+ output1 = gr.Image(label=(f"{current_model}"), show_download_button=True, elem_classes="output",
209
+ interactive=False, show_share_button=False, format=".png")
210
+
211
+ with gr.Row():
212
+ with gr.Column(scale=50):
213
+ input_text=gr.Textbox(label="Use this box to extend an idea automagically, by typing some words and clicking Extend Idea", lines=2)
214
+ see_prompts=gr.Button("Extend Idea -> overwrite the contents of the `Your Prompt´ box above")
215
+ use_short=gr.Button("Copy the contents of this box to the `Your Prompt´ box above")
216
+ def short_prompt(inputs):
217
+ return (inputs)
218
+
219
+ model_name1.change(set_model, inputs=model_name1, outputs=[output1])
220
+
221
+ #run.click(send_it1, inputs=[magic1, model_name1, neg_input, height, width, steps, cfg, seed], outputs=[output1])
222
+
223
+ gr.on(
224
+ triggers=[run.click, magic1.submit],
225
+ fn=send_it1,
226
+ inputs=[magic1, model_name1, neg_input, height, width, steps, cfg, seed],
227
+ outputs=[output1],
228
+ )
229
+
230
+ use_short.click(short_prompt, inputs=[input_text], outputs=magic1)
231
+
232
+ see_prompts.click(text_it1, inputs=[input_text], outputs=magic1)
233
+
234
+ myface.queue(default_concurrency_limit=200, max_size=200)
235
+ myface.launch(show_api=False, max_threads=400)