Afrinetwork7 commited on
Commit
0a30c9c
1 Parent(s): 5c4c947

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -32
app.py CHANGED
@@ -3,32 +3,68 @@ import numpy as np
3
  import random
4
  import spaces
5
  import torch
6
- from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler
7
- from transformers import CLIPTextModel, CLIPTokenizer,T5EncoderModel, T5TokenizerFast
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
  dtype = torch.bfloat16
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
-
12
  pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16).to(device)
13
-
14
  MAX_SEED = np.iinfo(np.int32).max
15
  MAX_IMAGE_SIZE = 2048
16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=5.0, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
19
  if randomize_seed:
20
  seed = random.randint(0, MAX_SEED)
21
  generator = torch.Generator().manual_seed(seed)
22
  image = pipe(
23
- prompt = prompt,
24
- width = width,
25
- height = height,
26
- num_inference_steps = num_inference_steps,
27
- generator = generator,
28
  guidance_scale=guidance_scale
29
- ).images[0]
30
- return image, seed
31
-
 
 
 
 
32
  examples = [
33
  "a tiny astronaut hatching from an egg on the moon",
34
  "a cat holding a sign that says hello world",
@@ -43,7 +79,6 @@ css="""
43
  """
44
 
45
  with gr.Blocks(css=css) as demo:
46
-
47
  with gr.Column(elem_id="col-container"):
48
  gr.Markdown(f"""# FLUX.1 [dev]
49
  12B param rectified flow transformer guidance-distilled from [FLUX.1 [pro]](https://blackforestlabs.ai/)
@@ -51,7 +86,6 @@ with gr.Blocks(css=css) as demo:
51
  """)
52
 
53
  with gr.Row():
54
-
55
  prompt = gr.Text(
56
  label="Prompt",
57
  show_label=False,
@@ -59,13 +93,11 @@ with gr.Blocks(css=css) as demo:
59
  placeholder="Enter your prompt",
60
  container=False,
61
  )
62
-
63
  run_button = gr.Button("Run", scale=0)
64
 
65
- result = gr.Image(label="Result", show_label=False)
66
 
67
  with gr.Accordion("Advanced Settings", open=False):
68
-
69
  seed = gr.Slider(
70
  label="Seed",
71
  minimum=0,
@@ -73,11 +105,8 @@ with gr.Blocks(css=css) as demo:
73
  step=1,
74
  value=0,
75
  )
76
-
77
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
78
-
79
  with gr.Row():
80
-
81
  width = gr.Slider(
82
  label="Width",
83
  minimum=256,
@@ -85,7 +114,6 @@ with gr.Blocks(css=css) as demo:
85
  step=32,
86
  value=1024,
87
  )
88
-
89
  height = gr.Slider(
90
  label="Height",
91
  minimum=256,
@@ -93,9 +121,7 @@ with gr.Blocks(css=css) as demo:
93
  step=32,
94
  value=1024,
95
  )
96
-
97
  with gr.Row():
98
-
99
  guidance_scale = gr.Slider(
100
  label="Guidance Scale",
101
  minimum=1,
@@ -103,7 +129,6 @@ with gr.Blocks(css=css) as demo:
103
  step=0.1,
104
  value=3.5,
105
  )
106
-
107
  num_inference_steps = gr.Slider(
108
  label="Number of inference steps",
109
  minimum=1,
@@ -113,18 +138,17 @@ with gr.Blocks(css=css) as demo:
113
  )
114
 
115
  gr.Examples(
116
- examples = examples,
117
- fn = infer,
118
- inputs = [prompt],
119
- outputs = [result, seed],
120
  cache_examples="lazy"
121
  )
122
-
123
  gr.on(
124
  triggers=[run_button.click, prompt.submit],
125
- fn = infer,
126
- inputs = [prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
127
- outputs = [result, seed]
128
  )
129
 
130
  demo.launch()
 
3
  import random
4
  import spaces
5
  import torch
6
+ from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler
7
+ from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast
8
+ import boto3
9
+ import os
10
+ from io import BytesIO
11
+ import time
12
+
13
+ # S3 Configuration
14
+ S3_BUCKET = "afri"
15
+ S3_REGION = "eu-west-3"
16
+ S3_ACCESS_KEY_ID = "AKIAQQABC7IQWFLKSE62"
17
+ S3_SECRET_ACCESS_KEY = "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"
18
+
19
+ # Set up S3 client
20
+ s3_client = boto3.client('s3',
21
+ region_name=S3_REGION,
22
+ aws_access_key_id=S3_ACCESS_KEY_ID,
23
+ aws_secret_access_key=S3_SECRET_ACCESS_KEY)
24
 
25
  dtype = torch.bfloat16
26
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
27
  pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16).to(device)
 
28
  MAX_SEED = np.iinfo(np.int32).max
29
  MAX_IMAGE_SIZE = 2048
30
 
31
+ def save_image_to_s3(image):
32
+ # Convert PIL Image to bytes
33
+ img_byte_arr = BytesIO()
34
+ image.save(img_byte_arr, format='PNG')
35
+ img_byte_arr = img_byte_arr.getvalue()
36
+
37
+ # Generate a unique filename
38
+ filename = f"generated_image_{int(time.time())}.png"
39
+
40
+ # Upload to S3
41
+ s3_client.put_object(Bucket=S3_BUCKET, Key=filename, Body=img_byte_arr)
42
+
43
+ # Generate a pre-signed URL (valid for 1 hour)
44
+ url = s3_client.generate_presigned_url('get_object',
45
+ Params={'Bucket': S3_BUCKET,
46
+ 'Key': filename},
47
+ ExpiresIn=3600)
48
+ return url
49
 
50
  def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=5.0, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
51
  if randomize_seed:
52
  seed = random.randint(0, MAX_SEED)
53
  generator = torch.Generator().manual_seed(seed)
54
  image = pipe(
55
+ prompt=prompt,
56
+ width=width,
57
+ height=height,
58
+ num_inference_steps=num_inference_steps,
59
+ generator=generator,
60
  guidance_scale=guidance_scale
61
+ ).images[0]
62
+
63
+ # Save image to S3 and get URL
64
+ image_url = save_image_to_s3(image)
65
+
66
+ return image_url, seed
67
+
68
  examples = [
69
  "a tiny astronaut hatching from an egg on the moon",
70
  "a cat holding a sign that says hello world",
 
79
  """
80
 
81
  with gr.Blocks(css=css) as demo:
 
82
  with gr.Column(elem_id="col-container"):
83
  gr.Markdown(f"""# FLUX.1 [dev]
84
  12B param rectified flow transformer guidance-distilled from [FLUX.1 [pro]](https://blackforestlabs.ai/)
 
86
  """)
87
 
88
  with gr.Row():
 
89
  prompt = gr.Text(
90
  label="Prompt",
91
  show_label=False,
 
93
  placeholder="Enter your prompt",
94
  container=False,
95
  )
 
96
  run_button = gr.Button("Run", scale=0)
97
 
98
+ result = gr.Text(label="Image URL", show_label=True)
99
 
100
  with gr.Accordion("Advanced Settings", open=False):
 
101
  seed = gr.Slider(
102
  label="Seed",
103
  minimum=0,
 
105
  step=1,
106
  value=0,
107
  )
 
108
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
 
109
  with gr.Row():
 
110
  width = gr.Slider(
111
  label="Width",
112
  minimum=256,
 
114
  step=32,
115
  value=1024,
116
  )
 
117
  height = gr.Slider(
118
  label="Height",
119
  minimum=256,
 
121
  step=32,
122
  value=1024,
123
  )
 
124
  with gr.Row():
 
125
  guidance_scale = gr.Slider(
126
  label="Guidance Scale",
127
  minimum=1,
 
129
  step=0.1,
130
  value=3.5,
131
  )
 
132
  num_inference_steps = gr.Slider(
133
  label="Number of inference steps",
134
  minimum=1,
 
138
  )
139
 
140
  gr.Examples(
141
+ examples=examples,
142
+ fn=infer,
143
+ inputs=[prompt],
144
+ outputs=[result, seed],
145
  cache_examples="lazy"
146
  )
 
147
  gr.on(
148
  triggers=[run_button.click, prompt.submit],
149
+ fn=infer,
150
+ inputs=[prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
151
+ outputs=[result, seed]
152
  )
153
 
154
  demo.launch()