Spaces:
Running
on
Zero
Running
on
Zero
patrickvonplaten
commited on
Commit
•
dcab473
1
Parent(s):
2b0be71
improve
Browse files
__pycache__/app.cpython-310.pyc
ADDED
Binary file (6.48 kB). View file
|
|
__pycache__/gallery_history.cpython-310.pyc
ADDED
Binary file (4.43 kB). View file
|
|
__pycache__/illusion_style.cpython-310.pyc
ADDED
Binary file (985 Bytes). View file
|
|
__pycache__/share_btn.cpython-310.pyc
ADDED
Binary file (6.95 kB). View file
|
|
app.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
import torch
|
2 |
-
import os
|
3 |
import gradio as gr
|
4 |
from PIL import Image
|
5 |
import random
|
@@ -15,7 +14,7 @@ from diffusers import (
|
|
15 |
EulerDiscreteScheduler # <-- Added import
|
16 |
)
|
17 |
from share_btn import community_icon_html, loading_icon_html, share_js
|
18 |
-
from gallery_history import fetch_gallery_history, show_gallery_history
|
19 |
from illusion_style import css
|
20 |
|
21 |
BASE_MODEL = "SG161222/Realistic_Vision_V5.1_noVAE"
|
@@ -112,6 +111,8 @@ def inference(
|
|
112 |
|
113 |
# Rest of your existing code
|
114 |
control_image_small = center_crop_resize(control_image)
|
|
|
|
|
115 |
main_pipe.scheduler = SAMPLER_MAP[sampler](main_pipe.scheduler.config)
|
116 |
my_seed = random.randint(0, 2**32 - 1) if seed == -1 else seed
|
117 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
@@ -128,7 +129,6 @@ def inference(
|
|
128 |
num_inference_steps=15,
|
129 |
output_type="latent"
|
130 |
)
|
131 |
-
control_image_large = center_crop_resize(control_image, (1024, 1024))
|
132 |
upscaled_latents = upscale(out, "nearest-exact", 2)
|
133 |
out_image = image_pipe(
|
134 |
prompt=prompt,
|
@@ -184,23 +184,25 @@ with gr.Blocks(css=css) as app:
|
|
184 |
loading_icon = gr.HTML(loading_icon_html)
|
185 |
share_button = gr.Button("Share to community", elem_id="share-btn")
|
186 |
|
187 |
-
history = show_gallery_history()
|
188 |
prompt.submit(
|
189 |
inference,
|
190 |
inputs=[control_image, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale, control_start, control_end, strength, seed, sampler],
|
191 |
outputs=[result_image, share_group, used_seed]
|
192 |
-
).then(
|
193 |
-
fn=fetch_gallery_history, inputs=[prompt, result_image], outputs=history, queue=False
|
194 |
)
|
|
|
|
|
|
|
195 |
run_btn.click(
|
196 |
inference,
|
197 |
inputs=[control_image, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale, control_start, control_end, strength, seed, sampler],
|
198 |
outputs=[result_image, share_group, used_seed]
|
199 |
-
).then(
|
200 |
-
fn=fetch_gallery_history, inputs=[prompt, result_image], outputs=history, queue=False
|
201 |
)
|
|
|
|
|
|
|
202 |
share_button.click(None, [], [], _js=share_js)
|
203 |
app.queue(max_size=20)
|
204 |
|
205 |
if __name__ == "__main__":
|
206 |
-
app.launch()
|
|
|
1 |
import torch
|
|
|
2 |
import gradio as gr
|
3 |
from PIL import Image
|
4 |
import random
|
|
|
14 |
EulerDiscreteScheduler # <-- Added import
|
15 |
)
|
16 |
from share_btn import community_icon_html, loading_icon_html, share_js
|
17 |
+
# from gallery_history import fetch_gallery_history, show_gallery_history
|
18 |
from illusion_style import css
|
19 |
|
20 |
BASE_MODEL = "SG161222/Realistic_Vision_V5.1_noVAE"
|
|
|
111 |
|
112 |
# Rest of your existing code
|
113 |
control_image_small = center_crop_resize(control_image)
|
114 |
+
control_image_large = center_crop_resize(control_image, (1024, 1024))
|
115 |
+
|
116 |
main_pipe.scheduler = SAMPLER_MAP[sampler](main_pipe.scheduler.config)
|
117 |
my_seed = random.randint(0, 2**32 - 1) if seed == -1 else seed
|
118 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
|
|
129 |
num_inference_steps=15,
|
130 |
output_type="latent"
|
131 |
)
|
|
|
132 |
upscaled_latents = upscale(out, "nearest-exact", 2)
|
133 |
out_image = image_pipe(
|
134 |
prompt=prompt,
|
|
|
184 |
loading_icon = gr.HTML(loading_icon_html)
|
185 |
share_button = gr.Button("Share to community", elem_id="share-btn")
|
186 |
|
187 |
+
# history = show_gallery_history()
|
188 |
prompt.submit(
|
189 |
inference,
|
190 |
inputs=[control_image, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale, control_start, control_end, strength, seed, sampler],
|
191 |
outputs=[result_image, share_group, used_seed]
|
|
|
|
|
192 |
)
|
193 |
+
# ).then(
|
194 |
+
# fn=fetch_gallery_history, inputs=[prompt, result_image], outputs=history, queue=False
|
195 |
+
# )
|
196 |
run_btn.click(
|
197 |
inference,
|
198 |
inputs=[control_image, prompt, negative_prompt, guidance_scale, controlnet_conditioning_scale, control_start, control_end, strength, seed, sampler],
|
199 |
outputs=[result_image, share_group, used_seed]
|
|
|
|
|
200 |
)
|
201 |
+
# ).then(
|
202 |
+
# fn=fetch_gallery_history, inputs=[prompt, result_image], outputs=history, queue=False
|
203 |
+
# )
|
204 |
share_button.click(None, [], [], _js=share_js)
|
205 |
app.queue(max_size=20)
|
206 |
|
207 |
if __name__ == "__main__":
|
208 |
+
app.launch(share=True)
|
requirements.txt
CHANGED
@@ -1,9 +1,11 @@
|
|
1 |
diffusers
|
2 |
transformers
|
3 |
accelerate
|
4 |
-
torch
|
5 |
xformers
|
6 |
gradio
|
7 |
Pillow
|
8 |
qrcode
|
9 |
-
filelock
|
|
|
|
|
|
|
|
1 |
diffusers
|
2 |
transformers
|
3 |
accelerate
|
|
|
4 |
xformers
|
5 |
gradio
|
6 |
Pillow
|
7 |
qrcode
|
8 |
+
filelock
|
9 |
+
|
10 |
+
--extra-index-url https://download.pytorch.org/whl/cu118
|
11 |
+
torch
|