KingNish commited on
Commit
7aafe2f
1 Parent(s): 40462a0
Files changed (4) hide show
  1. README.md +9 -7
  2. app.py +117 -4
  3. custom_pipeline.py +168 -0
  4. requirements.txt +7 -0
README.md CHANGED
@@ -1,12 +1,14 @@
1
  ---
2
- title: Realtime FLUX
3
- emoji: 🏃
4
- colorFrom: red
5
- colorTo: red
6
  sdk: gradio
7
- sdk_version: 4.44.0
8
  app_file: app.py
9
- pinned: false
 
 
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: FLUX Realtime
3
+ emoji:
4
+ colorFrom: yellow
5
+ colorTo: pink
6
  sdk: gradio
7
+ sdk_version: 4.36.0
8
  app_file: app.py
9
+ pinned: true
10
+ license: mit
11
+ short_description: High quality Images in Realtime
12
  ---
13
 
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,7 +1,120 @@
1
  import gradio as gr
 
 
 
 
 
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
 
 
 
5
 
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import numpy as np
3
+ import random
4
+ import spaces
5
+ import torch
6
+ import time
7
+ from diffusers import DiffusionPipeline
8
+ from custom_pipeline import FLUXPipelineWithIntermediateOutputs
9
 
10
+ # Constants
11
+ MAX_SEED = np.iinfo(np.int32).max
12
+ MAX_IMAGE_SIZE = 2048
13
+ DEFAULT_WIDTH = 1024
14
+ DEFAULT_HEIGHT = 1024
15
+ DEFAULT_INFERENCE_STEPS = 1
16
 
17
+ # Device and model setup
18
+ dtype = torch.float16
19
+ pipe = FLUXPipelineWithIntermediateOutputs.from_pretrained(
20
+ "black-forest-labs/FLUX.1-schnell", torch_dtype=dtype
21
+ ).to("cuda")
22
+ torch.cuda.empty_cache()
23
+
24
+ # Inference function
25
+ @spaces.GPU(duration=25)
26
+ def generate_image(prompt, seed=42, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, randomize_seed=False, num_inference_steps=DEFAULT_INFERENCE_STEPS):
27
+
28
+ if randomize_seed:
29
+ seed = random.randint(0, MAX_SEED)
30
+ generator = torch.Generator().manual_seed(seed)
31
+
32
+ start_time = time.time()
33
+
34
+ # Only generate the last image in the sequence
35
+ for img in pipe.generate_images(
36
+ prompt=prompt,
37
+ guidance_scale=0,
38
+ num_inference_steps=num_inference_steps,
39
+ width=width,
40
+ height=height,
41
+ generator=generator
42
+ ):
43
+ latency = f"Latency: {(time.time()-start_time):.2f} seconds"
44
+ yield img, seed, latency
45
+
46
+
47
+ # Example prompts
48
+ examples = [
49
+ "a tiny astronaut hatching from an egg on the moon",
50
+ "a cat holding a sign that says hello world",
51
+ "an anime illustration of a wiener schnitzel",
52
+ "a futuristic cityscape with flying cars and neon lights",
53
+ "Photo of a young woman with long, wavy brown hair tied in a bun and glasses. She has a fair complexion and is wearing subtle makeup, emphasizing her eyes and lips. She is dressed in a black top. The background appears to be an urban setting with a building facade, and the sunlight casts a warm glow on her face.",
54
+ "Imagine steve jobs as Star Wars movie character"
55
+ ]
56
+
57
+ # --- Gradio UI ---
58
+ with gr.Blocks() as demo:
59
+ with gr.Column(elem_id="app-container"):
60
+ gr.Markdown("# 🎨 Realtime FLUX Image Generator")
61
+ gr.Markdown("Generate stunning images in real-time with advanced AI technology.")
62
+
63
+ with gr.Row():
64
+ with gr.Column(scale=3):
65
+ result = gr.Image(label="Generated Image", show_label=False, interactive=False)
66
+ with gr.Column(scale=1):
67
+ prompt = gr.Text(
68
+ label="Prompt",
69
+ placeholder="Describe the image you want to generate...",
70
+ lines=3,
71
+ show_label=False,
72
+ container=False,
73
+ )
74
+ enhanceBtn = gr.Button("🚀 Enhance Image")
75
+
76
+ with gr.Column("Advanced Options"):
77
+ with gr.Row():
78
+ latency = gr.Text(show_label=False)
79
+ with gr.Row():
80
+ seed = gr.Number(label="Seed", value=42, precision=0)
81
+ randomize_seed = gr.Checkbox(label="Randomize Seed", value=False)
82
+ with gr.Row():
83
+ width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=DEFAULT_WIDTH)
84
+ height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=DEFAULT_HEIGHT)
85
+ num_inference_steps = gr.Slider(label="Inference Steps", minimum=1, maximum=4, step=1, value=DEFAULT_INFERENCE_STEPS)
86
+
87
+ with gr.Row():
88
+ gr.Markdown("### 🌟 Inspiration Gallery")
89
+ with gr.Row():
90
+ gr.Examples(
91
+ examples=examples,
92
+ fn=generate_image,
93
+ inputs=[prompt],
94
+ outputs=[result, seed],
95
+ cache_examples="lazy"
96
+ )
97
+
98
+ # Event handling - Trigger image generation on button click or input change
99
+ enhanceBtn.click(
100
+ fn=generate_image,
101
+ inputs=[prompt, seed, width, height],
102
+ outputs=[result, seed, latency],
103
+ show_progress="hidden",
104
+ show_api=False,
105
+ queue=False
106
+ )
107
+
108
+ gr.on(
109
+ triggers=[prompt.input, width.input, height.input, num_inference_steps.input],
110
+ fn=generate_image,
111
+ inputs=[prompt, seed, width, height, randomize_seed, num_inference_steps],
112
+ outputs=[result, seed, latency],
113
+ show_progress="hidden",
114
+ show_api=False,
115
+ trigger_mode="always_last",
116
+ queue=False
117
+ )
118
+
119
+ # Launch the app
120
+ demo.launch()
custom_pipeline.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ from diffusers import FluxPipeline, FlowMatchEulerDiscreteScheduler
4
+ from typing import Any, Dict, List, Optional, Union
5
+ from PIL import Image
6
+
7
+ # Constants for shift calculation
8
+ BASE_SEQ_LEN = 256
9
+ MAX_SEQ_LEN = 4096
10
+ BASE_SHIFT = 0.5
11
+ MAX_SHIFT = 1.2
12
+
13
+ # Helper functions
14
+ def calculate_timestep_shift(image_seq_len: int) -> float:
15
+ """Calculates the timestep shift (mu) based on the image sequence length."""
16
+ m = (MAX_SHIFT - BASE_SHIFT) / (MAX_SEQ_LEN - BASE_SEQ_LEN)
17
+ b = BASE_SHIFT - m * BASE_SEQ_LEN
18
+ mu = image_seq_len * m + b
19
+ return mu
20
+
21
+ def prepare_timesteps(
22
+ scheduler: FlowMatchEulerDiscreteScheduler,
23
+ num_inference_steps: Optional[int] = None,
24
+ device: Optional[Union[str, torch.device]] = None,
25
+ timesteps: Optional[List[int]] = None,
26
+ sigmas: Optional[List[float]] = None,
27
+ mu: Optional[float] = None,
28
+ ) -> (torch.Tensor, int):
29
+ """Prepares the timesteps for the diffusion process."""
30
+ if timesteps is not None and sigmas is not None:
31
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed.")
32
+
33
+ if timesteps is not None:
34
+ scheduler.set_timesteps(timesteps=timesteps, device=device)
35
+ elif sigmas is not None:
36
+ scheduler.set_timesteps(sigmas=sigmas, device=device)
37
+ else:
38
+ scheduler.set_timesteps(num_inference_steps, device=device, mu=mu)
39
+
40
+ timesteps = scheduler.timesteps
41
+ num_inference_steps = len(timesteps)
42
+ return timesteps, num_inference_steps
43
+
44
+ # FLUX pipeline function
45
+ class FLUXPipelineWithIntermediateOutputs(FluxPipeline):
46
+ """
47
+ Extends the FluxPipeline to yield intermediate images during the denoising process
48
+ with progressively increasing resolution for faster generation.
49
+ """
50
+ @torch.inference_mode()
51
+ def generate_images(
52
+ self,
53
+ prompt: Union[str, List[str]] = None,
54
+ prompt_2: Optional[Union[str, List[str]]] = None,
55
+ height: Optional[int] = None,
56
+ width: Optional[int] = None,
57
+ num_inference_steps: int = 4,
58
+ timesteps: List[int] = None,
59
+ guidance_scale: float = 3.5,
60
+ num_images_per_prompt: Optional[int] = 1,
61
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
62
+ latents: Optional[torch.FloatTensor] = None,
63
+ prompt_embeds: Optional[torch.FloatTensor] = None,
64
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
65
+ output_type: Optional[str] = "pil",
66
+ return_dict: bool = True,
67
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
68
+ max_sequence_length: int = 300,
69
+ ):
70
+ """Generates images and yields intermediate results during the denoising process."""
71
+ height = height or self.default_sample_size * self.vae_scale_factor
72
+ width = width or self.default_sample_size * self.vae_scale_factor
73
+
74
+ # 1. Check inputs
75
+ self.check_inputs(
76
+ prompt,
77
+ prompt_2,
78
+ height,
79
+ width,
80
+ prompt_embeds=prompt_embeds,
81
+ pooled_prompt_embeds=pooled_prompt_embeds,
82
+ max_sequence_length=max_sequence_length,
83
+ )
84
+
85
+ self._guidance_scale = guidance_scale
86
+ self._joint_attention_kwargs = joint_attention_kwargs
87
+ self._interrupt = False
88
+
89
+ # 2. Define call parameters
90
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
91
+ device = self._execution_device
92
+
93
+ # 3. Encode prompt
94
+ lora_scale = joint_attention_kwargs.get("scale", None) if joint_attention_kwargs is not None else None
95
+ prompt_embeds, pooled_prompt_embeds, text_ids = self.encode_prompt(
96
+ prompt=prompt,
97
+ prompt_2=prompt_2,
98
+ prompt_embeds=prompt_embeds,
99
+ pooled_prompt_embeds=pooled_prompt_embeds,
100
+ device=device,
101
+ num_images_per_prompt=num_images_per_prompt,
102
+ max_sequence_length=max_sequence_length,
103
+ lora_scale=lora_scale,
104
+ )
105
+ # 4. Prepare latent variables
106
+ num_channels_latents = self.transformer.config.in_channels // 4
107
+ latents, latent_image_ids = self.prepare_latents(
108
+ batch_size * num_images_per_prompt,
109
+ num_channels_latents,
110
+ height,
111
+ width,
112
+ prompt_embeds.dtype,
113
+ device,
114
+ generator,
115
+ latents,
116
+ )
117
+ # 5. Prepare timesteps
118
+ sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps)
119
+ image_seq_len = latents.shape[1]
120
+ mu = calculate_timestep_shift(image_seq_len)
121
+ timesteps, num_inference_steps = prepare_timesteps(
122
+ self.scheduler,
123
+ num_inference_steps,
124
+ device,
125
+ timesteps,
126
+ sigmas,
127
+ mu=mu,
128
+ )
129
+ self._num_timesteps = len(timesteps)
130
+
131
+ # Handle guidance
132
+ guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float16).expand(latents.shape[0]) if self.transformer.config.guidance_embeds else None
133
+
134
+ # 6. Denoising loop
135
+ for i, t in enumerate(timesteps):
136
+ if self.interrupt:
137
+ continue
138
+
139
+ timestep = t.expand(latents.shape[0]).to(latents.dtype)
140
+
141
+ noise_pred = self.transformer(
142
+ hidden_states=latents,
143
+ timestep=timestep / 1000,
144
+ guidance=guidance,
145
+ pooled_projections=pooled_prompt_embeds,
146
+ encoder_hidden_states=prompt_embeds,
147
+ txt_ids=text_ids,
148
+ img_ids=latent_image_ids,
149
+ joint_attention_kwargs=self.joint_attention_kwargs,
150
+ return_dict=False,
151
+ )[0]
152
+
153
+ # Yield intermediate result
154
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
155
+ yield self._decode_latents_to_image(latents, height, width, output_type)
156
+ torch.cuda.empty_cache()
157
+
158
+ # Final image
159
+ self.maybe_free_model_hooks()
160
+ torch.cuda.empty_cache()
161
+
162
+ def _decode_latents_to_image(self, latents, height, width, output_type, vae=None):
163
+ """Decodes the given latents into an image."""
164
+ vae = vae or self.vae
165
+ latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
166
+ latents = (latents / vae.config.scaling_factor) + vae.config.shift_factor
167
+ image = vae.decode(latents, return_dict=False)[0]
168
+ return self.image_processor.postprocess(image, output_type=output_type)[0]
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ accelerate
2
+ git+https://github.com/huggingface/diffusers.git
3
+ torch
4
+ gradio
5
+ transformers
6
+ xformers
7
+ sentencepiece