Prgckwb commited on
Commit
23f2de3
1 Parent(s): 0dbdb20

:tada: change example

Browse files
Files changed (2) hide show
  1. app.py +40 -77
  2. src/example.py +46 -0
app.py CHANGED
@@ -1,5 +1,3 @@
1
- import dataclasses
2
-
3
  import gradio as gr
4
  import spaces
5
  import torch
@@ -7,6 +5,8 @@ from PIL import Image
7
  from diffusers import DiffusionPipeline
8
  from diffusers.utils import make_image_grid
9
 
 
 
10
  DIFFUSERS_MODEL_IDS = [
11
  # SD Models
12
  "stabilityai/stable-diffusion-3-medium-diffusers",
@@ -22,50 +22,36 @@ EXTERNAL_MODEL_MAPPING = {
22
  }
23
  MODEL_CHOICES = DIFFUSERS_MODEL_IDS + list(EXTERNAL_MODEL_MAPPING.keys())
24
 
25
- current_model_id = "stabilityai/stable-diffusion-3-medium-diffusers"
26
  device = "cuda" if torch.cuda.is_available() else "cpu"
27
- pipe = None
28
-
29
-
30
- @dataclasses.dataclass
31
- class Input:
32
- prompt: str
33
- model_id: str = "stabilityai/stable-diffusion-3-medium-diffusers"
34
- negative_prompt: str = ''
35
- width: int = 1024
36
- height: int = 1024
37
- guidance_scale: float = 7.5
38
- num_inference_step: int = 28
39
- num_images: int = 4
40
- use_safety_checker: bool = True
41
- use_model_offload: bool = False
42
- seed: int = 8888
43
-
44
- def to_list(self):
45
- return [
46
- self.prompt, self.model_id, self.negative_prompt,
47
- self.width, self.height, self.guidance_scale,
48
- self.num_inference_step, self.num_images, self.use_safety_checker, self.use_model_offload,
49
- self.seed
50
- ]
51
 
52
 
53
- EXAMPLES = [
54
- Input(prompt='A cat holding a sign that says Hello world').to_list(),
55
- Input(
56
- prompt='Beautiful pixel art of a Wizard with hovering text "Achivement unlocked: Diffusion models can spell now"'
57
- ).to_list(),
58
- Input(prompt='A corgi wearing sunglasses says "U-Net is OVER!!"').to_list(),
59
- Input(
60
- prompt='Cinematic Photo of a beautiful korean fashion model bokeh train',
61
- model_id='Beautiful Realistic Asians',
62
- negative_prompt='worst_quality, BadNegAnatomyV1-neg, bradhands cartoon, cgi, render, illustration, painting, drawing',
63
- width=512,
64
- height=512,
65
- guidance_scale=5.0,
66
- num_inference_step=50,
67
- ).to_list()
68
- ]
 
 
 
 
 
 
 
 
 
 
 
69
 
70
 
71
  @spaces.GPU(duration=120)
@@ -84,37 +70,8 @@ def inference(
84
  seed: int = 8888,
85
  progress=gr.Progress(track_tqdm=True),
86
  ) -> Image.Image:
87
- progress(0, "Starting inference...")
88
-
89
- global current_model_id, pipe
90
-
91
- progress(0.1, 'Loading pipeline...')
92
- if model_id not in DIFFUSERS_MODEL_IDS:
93
- model_id = EXTERNAL_MODEL_MAPPING[model_id]
94
-
95
- pipe = DiffusionPipeline.from_pretrained(
96
- model_id,
97
- torch_dtype=torch.float16,
98
- )
99
-
100
- current_model_id = model_id
101
-
102
- if not safety_checker:
103
- pipe.safety_checker = None
104
-
105
- if model_id not in DIFFUSERS_MODEL_IDS:
106
- progress(0.3, 'Loading Textual Inversion...')
107
- # Load Textual Inversion
108
- pipe.load_textual_inversion('checkpoints/embeddings/BadNegAnatomyV1 neg.pt', token='BadNegAnatomyV1-neg')
109
- pipe.load_textual_inversion('checkpoints/embeddings/Deep Negative V1 75T.pt', token='DeepNegativeV1')
110
- pipe.load_textual_inversion('checkpoints/embeddings/easynegative.safetensors', token='EasyNegative')
111
-
112
- # Generation
113
- progress(0.4, 'Generating images...')
114
- if use_model_offload:
115
- pipe.enable_model_cpu_offload()
116
- else:
117
- pipe = pipe.to('cuda')
118
 
119
  generator = torch.Generator(device=device).manual_seed(seed)
120
 
@@ -137,10 +94,12 @@ def inference(
137
  return image
138
 
139
 
140
- if __name__ == "__main__":
 
 
141
  theme = gr.themes.Default(primary_hue=gr.themes.colors.emerald)
142
 
143
- with gr.Blocks(theme=theme) as demo:
144
  gr.Markdown(f"# Stable Diffusion Demo")
145
 
146
  with gr.Row():
@@ -217,5 +176,9 @@ if __name__ == "__main__":
217
  fn=inference,
218
  cache_examples='lazy'
219
  )
 
220
 
221
- demo.queue().launch()
 
 
 
 
 
 
1
  import gradio as gr
2
  import spaces
3
  import torch
 
5
  from diffusers import DiffusionPipeline
6
  from diffusers.utils import make_image_grid
7
 
8
+ from src.example import EXAMPLES
9
+
10
  DIFFUSERS_MODEL_IDS = [
11
  # SD Models
12
  "stabilityai/stable-diffusion-3-medium-diffusers",
 
22
  }
23
  MODEL_CHOICES = DIFFUSERS_MODEL_IDS + list(EXTERNAL_MODEL_MAPPING.keys())
24
 
 
25
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
 
28
+ def load_pipeline(model_id, use_model_offload, safety_checker):
29
+ # Diffusers リポジトリ内のモデル
30
+ if model_id in DIFFUSERS_MODEL_IDS:
31
+ pipe = DiffusionPipeline.from_pretrained(
32
+ model_id,
33
+ torch_dtype=torch.float16,
34
+ )
35
+
36
+ # CIVITAI 系列由来のモデル
37
+ else:
38
+ pipe = DiffusionPipeline.from_pretrained(
39
+ EXTERNAL_MODEL_MAPPING[model_id],
40
+ torch_dtype=torch.float16,
41
+ )
42
+
43
+ # Load Textual Inversion
44
+ pipe.load_textual_inversion('checkpoints/embeddings/BadNegAnatomyV1 neg.pt', token='BadNegAnatomyV1-neg')
45
+ pipe.load_textual_inversion('checkpoints/embeddings/Deep Negative V1 75T.pt', token='DeepNegativeV1')
46
+ pipe.load_textual_inversion('checkpoints/embeddings/easynegative.safetensors', token='EasyNegative')
47
+
48
+ # VRAM が少ないとき用の対策
49
+ if use_model_offload:
50
+ pipe.enable_model_cpu_offload()
51
+ else:
52
+ pipe = pipe.to(device)
53
+
54
+ return pipe
55
 
56
 
57
  @spaces.GPU(duration=120)
 
70
  seed: int = 8888,
71
  progress=gr.Progress(track_tqdm=True),
72
  ) -> Image.Image:
73
+ progress(0, 'Loading pipeline...')
74
+ pipe = load_pipeline(model_id, use_model_offload, safety_checker)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
  generator = torch.Generator(device=device).manual_seed(seed)
77
 
 
94
  return image
95
 
96
 
97
+ def build_interface():
98
+ """Build Gradio Interface"""
99
+
100
  theme = gr.themes.Default(primary_hue=gr.themes.colors.emerald)
101
 
102
+ with gr.Blocks(theme=theme) as interface:
103
  gr.Markdown(f"# Stable Diffusion Demo")
104
 
105
  with gr.Row():
 
176
  fn=inference,
177
  cache_examples='lazy'
178
  )
179
+ return interface
180
 
181
+
182
+ if __name__ == "__main__":
183
+ iface = build_interface()
184
+ iface.queue().launch()
src/example.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+
3
+
4
+ @dataclasses.dataclass
5
+ class Example:
6
+ prompt: str
7
+ model_id: str = "stabilityai/stable-diffusion-3-medium-diffusers"
8
+ negative_prompt: str = ''
9
+ width: int = 1024
10
+ height: int = 1024
11
+ guidance_scale: float = 7.5
12
+ num_inference_step: int = 28
13
+ num_images: int = 4
14
+ use_safety_checker: bool = True
15
+ use_model_offload: bool = False
16
+ seed: int = 8888
17
+
18
+ def to_list(self):
19
+ return [
20
+ self.prompt, self.model_id, self.negative_prompt,
21
+ self.width, self.height, self.guidance_scale,
22
+ self.num_inference_step, self.num_images, self.use_safety_checker, self.use_model_offload,
23
+ self.seed
24
+ ]
25
+
26
+
27
+ EXAMPLES = [
28
+ Example(
29
+ prompt='A cat holding a sign that says Hello world'
30
+ ).to_list(),
31
+ Example(
32
+ prompt='Beautiful pixel art of a Wizard with hovering text "Achivement unlocked: Diffusion models can spell now"'
33
+ ).to_list(),
34
+ Example(
35
+ prompt='A corgi wearing sunglasses says "U-Net is OVER!!"'
36
+ ).to_list(),
37
+ Example(
38
+ prompt='Cinematic Photo of a beautiful korean fashion model bokeh train',
39
+ model_id='Beautiful Realistic Asians',
40
+ negative_prompt='worst_quality, BadNegAnatomyV1-neg, bradhands cartoon, cgi, render, illustration, painting, drawing',
41
+ width=512,
42
+ height=512,
43
+ guidance_scale=5.0,
44
+ num_inference_step=50,
45
+ ).to_list()
46
+ ]