kadirnar commited on
Commit
d15eeb1
1 Parent(s): c9aaca7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -11
app.py CHANGED
@@ -7,7 +7,7 @@ import numpy as np
7
  from PIL import Image
8
  import spaces
9
  import torch
10
- from diffusers import StableDiffusion3Pipeline,StableDiffusion3Img2ImgPipeline, DPMSolverMultistepScheduler, AutoencoderKL
11
  from huggingface_hub import snapshot_download
12
 
13
  huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
@@ -33,8 +33,11 @@ ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
33
 
34
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
35
 
36
- pipe = StableDiffusion3Pipeline.from_pretrained(model_path, torch_dtype=torch.float16)
37
- img2img_pipe = StableDiffusion3Img2ImgPipeline.from_pretrained(model_path, torch_dtype=torch.float16)
 
 
 
38
 
39
  def save_image(img):
40
  unique_name = str(uuid.uuid4()) + ".png"
@@ -63,6 +66,7 @@ def generate(
63
  use_resolution_binning: bool = True,
64
  progress=gr.Progress(track_tqdm=True),
65
  ):
 
66
  pipe.to(device)
67
  seed = int(randomize_seed_fn(seed, randomize_seed))
68
  generator = torch.Generator().manual_seed(seed)
@@ -100,7 +104,8 @@ def img2img_generate(
100
  use_resolution_binning: bool = True,
101
  progress=gr.Progress(track_tqdm=True),
102
  ):
103
- img2img_pipe.to(device)
 
104
  seed = int(randomize_seed_fn(seed, randomize_seed))
105
  generator = torch.Generator().manual_seed(seed)
106
 
@@ -109,7 +114,7 @@ def img2img_generate(
109
 
110
  init_image = init_image.resize((768, 768))
111
 
112
- output = img2img_pipe(
113
  prompt=prompt,
114
  image=init_image,
115
  negative_prompt=negative_prompt,
@@ -177,7 +182,7 @@ with gr.Blocks(css=css) as demo:
177
  )
178
  seed = gr.Slider(
179
  label="Seed",
180
- min=0,
181
  maximum=MAX_SEED,
182
  step=1,
183
  value=0,
@@ -185,14 +190,14 @@ with gr.Blocks(css=css) as demo:
185
 
186
  steps = gr.Slider(
187
  label="Steps",
188
- min=0,
189
  maximum=60,
190
  step=1,
191
  value=25,
192
  )
193
  number_image = gr.Slider(
194
  label="Number of Images",
195
- min=1,
196
  maximum=4,
197
  step=1,
198
  value=1,
@@ -201,14 +206,14 @@ with gr.Blocks(css=css) as demo:
201
  with gr.Row(visible=True):
202
  width = gr.Slider(
203
  label="Width",
204
- min=256,
205
  maximum=MAX_IMAGE_SIZE,
206
  step=32,
207
  value=1024,
208
  )
209
  height = gr.Slider(
210
  label="Height",
211
- min=256,
212
  maximum=MAX_IMAGE_SIZE,
213
  step=32,
214
  value=1024,
@@ -216,7 +221,7 @@ with gr.Blocks(css=css) as demo:
216
  with gr.Row():
217
  guidance_scale = gr.Slider(
218
  label="Guidance Scale",
219
- min=0.1,
220
  maximum=10,
221
  step=0.1,
222
  value=7.0,
 
7
  from PIL import Image
8
  import spaces
9
  import torch
10
+ from diffusers import StableDiffusion3Pipeline, DPMSolverMultistepScheduler, AutoencoderKL, StableDiffusion3Img2ImgPipeline
11
  from huggingface_hub import snapshot_download
12
 
13
  huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
 
33
 
34
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
35
 
36
+ def load_pipeline(pipeline_type):
37
+ if pipeline_type == "text2img":
38
+ return StableDiffusion3Pipeline.from_pretrained(model_path, torch_dtype=torch.float16)
39
+ elif pipeline_type == "img2img":
40
+ return StableDiffusion3Img2ImgPipeline.from_pretrained(model_path, torch_dtype=torch.float16)
41
 
42
  def save_image(img):
43
  unique_name = str(uuid.uuid4()) + ".png"
 
66
  use_resolution_binning: bool = True,
67
  progress=gr.Progress(track_tqdm=True),
68
  ):
69
+ pipe = load_pipeline("text2img")
70
  pipe.to(device)
71
  seed = int(randomize_seed_fn(seed, randomize_seed))
72
  generator = torch.Generator().manual_seed(seed)
 
104
  use_resolution_binning: bool = True,
105
  progress=gr.Progress(track_tqdm=True),
106
  ):
107
+ pipe = load_pipeline("img2img")
108
+ pipe.to(device)
109
  seed = int(randomize_seed_fn(seed, randomize_seed))
110
  generator = torch.Generator().manual_seed(seed)
111
 
 
114
 
115
  init_image = init_image.resize((768, 768))
116
 
117
+ output = pipe(
118
  prompt=prompt,
119
  image=init_image,
120
  negative_prompt=negative_prompt,
 
182
  )
183
  seed = gr.Slider(
184
  label="Seed",
185
+ minimum=0,
186
  maximum=MAX_SEED,
187
  step=1,
188
  value=0,
 
190
 
191
  steps = gr.Slider(
192
  label="Steps",
193
+ minimum=0,
194
  maximum=60,
195
  step=1,
196
  value=25,
197
  )
198
  number_image = gr.Slider(
199
  label="Number of Images",
200
+ minimum=1,
201
  maximum=4,
202
  step=1,
203
  value=1,
 
206
  with gr.Row(visible=True):
207
  width = gr.Slider(
208
  label="Width",
209
+ minimum=256,
210
  maximum=MAX_IMAGE_SIZE,
211
  step=32,
212
  value=1024,
213
  )
214
  height = gr.Slider(
215
  label="Height",
216
+ minimum=256,
217
  maximum=MAX_IMAGE_SIZE,
218
  step=32,
219
  value=1024,
 
221
  with gr.Row():
222
  guidance_scale = gr.Slider(
223
  label="Guidance Scale",
224
+ minimum=0.1,
225
  maximum=10,
226
  step=0.1,
227
  value=7.0,