multimodalart HF staff commited on
Commit
49ad6a5
β€’
1 Parent(s): 86d5e88

Hopefully reduce overhead between users

Browse files
Files changed (1) hide show
  1. app.py +43 -11
app.py CHANGED
@@ -1,5 +1,6 @@
1
  import torch
2
  import gradio as gr
 
3
  from PIL import Image
4
  import random
5
  from diffusers import (
@@ -95,7 +96,15 @@ def check_inputs(prompt: str, control_image: Image.Image):
95
  raise gr.Error("Please select or upload an Input Illusion")
96
  if prompt is None or prompt == "":
97
  raise gr.Error("Prompt is required")
98
-
 
 
 
 
 
 
 
 
99
  # Inference function
100
  def inference(
101
  control_image: Image.Image,
@@ -156,10 +165,8 @@ def inference(
156
  end_time_struct = time.localtime(end_time)
157
  end_time_formatted = time.strftime("%H:%M:%S", end_time_struct)
158
  print(f"Inference ended at {end_time_formatted}, taking {end_time-start_time}s")
159
- return out_image["images"][0], gr.update(visible=True), my_seed
160
 
161
- #return out
162
-
163
  with gr.Blocks(css=css) as app:
164
  gr.Markdown(
165
  '''
@@ -173,7 +180,8 @@ with gr.Blocks(css=css) as app:
173
  Given a prompt and your pattern, we use a QR code conditioned controlnet to create a stunning illusion! Credit to: [MrUgleh](https://twitter.com/MrUgleh) for discovering the workflow :)
174
  '''
175
  )
176
-
 
177
  with gr.Row():
178
  with gr.Column():
179
  control_image = gr.Image(label="Input Illusion", type="pil", elem_id="control_image")
@@ -198,14 +206,26 @@ with gr.Blocks(css=css) as app:
198
  share_button = gr.Button("Share to community", elem_id="share-btn")
199
 
200
  history = show_gallery_history()
201
- prompt.submit(
202
  check_inputs,
203
  inputs=[prompt, control_image],
204
  queue=False
 
 
 
 
 
 
205
  ).success(
206
  inference,
207
- inputs=[control_image, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale, control_start, control_end, strength, seed, sampler],
208
- outputs=[result_image, share_group, used_seed]
 
 
 
 
 
 
209
  ).success(
210
  fn=fetch_gallery_history, inputs=[prompt, result_image], outputs=history, queue=False
211
  )
@@ -213,10 +233,22 @@ with gr.Blocks(css=css) as app:
213
  check_inputs,
214
  inputs=[prompt, control_image],
215
  queue=False
 
 
 
 
 
 
216
  ).success(
217
  inference,
218
- inputs=[control_image, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale, control_start, control_end, strength, seed, sampler],
219
- outputs=[result_image, share_group, used_seed]
 
 
 
 
 
 
220
  ).success(
221
  fn=fetch_gallery_history, inputs=[prompt, result_image], outputs=history, queue=False
222
  )
@@ -224,4 +256,4 @@ with gr.Blocks(css=css) as app:
224
  app.queue(max_size=20)
225
 
226
  if __name__ == "__main__":
227
- app.launch(max_threads=240)
 
1
  import torch
2
  import gradio as gr
3
+ from gradio import processing_utils, utils
4
  from PIL import Image
5
  import random
6
  from diffusers import (
 
96
  raise gr.Error("Please select or upload an Input Illusion")
97
  if prompt is None or prompt == "":
98
  raise gr.Error("Prompt is required")
99
+
100
+ def convert_to_pil(base64_image):
101
+ pil_image = processing_utils.decode_base64_to_image(base64_image)
102
+ return pil_image
103
+
104
+ def convert_to_base64(pil_image):
105
+ base64_image = processing_utils.encode_pil_to_base64(pil_image)
106
+ return base64_image
107
+
108
  # Inference function
109
  def inference(
110
  control_image: Image.Image,
 
165
  end_time_struct = time.localtime(end_time)
166
  end_time_formatted = time.strftime("%H:%M:%S", end_time_struct)
167
  print(f"Inference ended at {end_time_formatted}, taking {end_time-start_time}s")
168
+ return out_image["images"][0], gr.update(visible=True), gr.update(visible=True), my_seed
169
 
 
 
170
  with gr.Blocks(css=css) as app:
171
  gr.Markdown(
172
  '''
 
180
  Given a prompt and your pattern, we use a QR code conditioned controlnet to create a stunning illusion! Credit to: [MrUgleh](https://twitter.com/MrUgleh) for discovering the workflow :)
181
  '''
182
  )
183
+ state_img_input = gr.State()
184
+ state_img_output = gr.State()
185
  with gr.Row():
186
  with gr.Column():
187
  control_image = gr.Image(label="Input Illusion", type="pil", elem_id="control_image")
 
206
  share_button = gr.Button("Share to community", elem_id="share-btn")
207
 
208
  history = show_gallery_history()
209
+ prompt.click(
210
  check_inputs,
211
  inputs=[prompt, control_image],
212
  queue=False
213
+ ).success(
214
+ convert_to_pil,
215
+ inputs=[control_image],
216
+ outputs=[state_img_input],
217
+ queue=False,
218
+ preprocess=Falgse,
219
  ).success(
220
  inference,
221
+ inputs=[state_img_input, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale, control_start, control_end, strength, seed, sampler],
222
+ outputs=[state_img_output, result_image, share_group, used_seed]
223
+ ).success(
224
+ convert_to_base64,
225
+ inputs=[state_img_output],
226
+ outputs=[result_image],
227
+ queue=False,
228
+ postprocess=False
229
  ).success(
230
  fn=fetch_gallery_history, inputs=[prompt, result_image], outputs=history, queue=False
231
  )
 
233
  check_inputs,
234
  inputs=[prompt, control_image],
235
  queue=False
236
+ ).success(
237
+ convert_to_pil,
238
+ inputs=[control_image],
239
+ outputs=[state_img_input],
240
+ queue=False,
241
+ preprocess=False,
242
  ).success(
243
  inference,
244
+ inputs=[state_img_input, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale, control_start, control_end, strength, seed, sampler],
245
+ outputs=[state_img_output, result_image, share_group, used_seed]
246
+ ).success(
247
+ convert_to_base64,
248
+ inputs=[state_img_output],
249
+ outputs=[result_image],
250
+ queue=False,
251
+ postprocess=False
252
  ).success(
253
  fn=fetch_gallery_history, inputs=[prompt, result_image], outputs=history, queue=False
254
  )
 
256
  app.queue(max_size=20)
257
 
258
  if __name__ == "__main__":
259
+ app.launch(max_threads=400)