multimodalart HF staff commited on
Commit
1a833ba
β€’
1 Parent(s): 4984c7e

Fix random seed and add a peak to last used seed

Browse files
Files changed (1) hide show
  1. app.py +15 -6
app.py CHANGED
@@ -2,6 +2,7 @@ import torch
2
  import os
3
  import gradio as gr
4
  from PIL import Image
 
5
  from diffusers import (
6
  DiffusionPipeline,
7
  AutoencoderKL,
@@ -110,8 +111,9 @@ def inference(
110
  # Rest of your existing code
111
  control_image_small = center_crop_resize(control_image)
112
  main_pipe.scheduler = SAMPLER_MAP[sampler](main_pipe.scheduler.config)
113
- generator = torch.manual_seed(seed) if seed != -1 else torch.Generator()
114
-
 
115
  out = main_pipe(
116
  prompt=prompt,
117
  negative_prompt=negative_prompt,
@@ -139,7 +141,7 @@ def inference(
139
  control_guidance_end=float(control_guidance_end),
140
  controlnet_conditioning_scale=float(controlnet_conditioning_scale)
141
  )
142
- return out_image["images"][0], gr.update(visible=True)
143
 
144
  #return out
145
 
@@ -170,7 +172,8 @@ with gr.Blocks(css=css) as app:
170
  control_start = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0, label="Start of ControlNet")
171
  control_end = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=1, label="End of ControlNet")
172
  strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=1, label="Strength of the upscaler")
173
- seed = gr.Slider(minimum=-1, maximum=9999999999, step=1, value=-1, label="Seed", info="-1 means random seed", randomize=True)
 
174
  run_btn = gr.Button("Run")
175
  with gr.Column():
176
  result_image = gr.Image(label="Illusion Diffusion Output", interactive=False, elem_id="output")
@@ -180,11 +183,17 @@ with gr.Blocks(css=css) as app:
180
  share_button = gr.Button("Share to community", elem_id="share-btn")
181
 
182
  history = show_gallery_history()
183
-
 
 
 
 
 
 
184
  run_btn.click(
185
  inference,
186
  inputs=[control_image, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale, control_start, control_end, strength, seed, sampler],
187
- outputs=[result_image, share_group]
188
  ).then(
189
  fn=fetch_gallery_history, inputs=[prompt, result_image], outputs=history, queue=False
190
  )
 
2
  import os
3
  import gradio as gr
4
  from PIL import Image
5
+ import random
6
  from diffusers import (
7
  DiffusionPipeline,
8
  AutoencoderKL,
 
111
  # Rest of your existing code
112
  control_image_small = center_crop_resize(control_image)
113
  main_pipe.scheduler = SAMPLER_MAP[sampler](main_pipe.scheduler.config)
114
+ my_seed = random.randint(0, 2**32 - 1) if seed == -1 else seed
115
+ generator = torch.manual_seed(my_seed)
116
+
117
  out = main_pipe(
118
  prompt=prompt,
119
  negative_prompt=negative_prompt,
 
141
  control_guidance_end=float(control_guidance_end),
142
  controlnet_conditioning_scale=float(controlnet_conditioning_scale)
143
  )
144
+ return out_image["images"][0], gr.update(visible=True), my_seed
145
 
146
  #return out
147
 
 
172
  control_start = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0, label="Start of ControlNet")
173
  control_end = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=1, label="End of ControlNet")
174
  strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=1, label="Strength of the upscaler")
175
+ seed = gr.Slider(minimum=-1, maximum=9999999999, step=1, value=-1, label="Seed", info="-1 means random seed")
176
+ used_seed = gr.Number(label="Last seed used",interactive=False)
177
  run_btn = gr.Button("Run")
178
  with gr.Column():
179
  result_image = gr.Image(label="Illusion Diffusion Output", interactive=False, elem_id="output")
 
183
  share_button = gr.Button("Share to community", elem_id="share-btn")
184
 
185
  history = show_gallery_history()
186
+ prompt.submit(
187
+ inference,
188
+ inputs=[control_image, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale, control_start, control_end, strength, seed, sampler],
189
+ outputs=[result_image, share_group, used_seed]
190
+ ).then(
191
+ fn=fetch_gallery_history, inputs=[prompt, result_image], outputs=history, queue=False
192
+ )
193
  run_btn.click(
194
  inference,
195
  inputs=[control_image, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale, control_start, control_end, strength, seed, sampler],
196
+ outputs=[result_image, share_group, used_seed]
197
  ).then(
198
  fn=fetch_gallery_history, inputs=[prompt, result_image], outputs=history, queue=False
199
  )