kasper-boy commited on
Commit
995ccda
1 Parent(s): 524b9b9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -11
app.py CHANGED
@@ -11,9 +11,7 @@ import torch
11
  from diffusers import DiffusionPipeline
12
  from typing import Tuple
13
 
14
- #Check for the Model Base..//
15
-
16
-
17
 
18
  bad_words = json.loads(os.getenv('BAD_WORDS', "[]"))
19
  bad_words_negative = json.loads(os.getenv('BAD_WORDS_NEGATIVE', "[]"))
@@ -28,8 +26,6 @@ def check_text(prompt, negative=""):
28
  return True
29
  return False
30
 
31
-
32
-
33
  style_list = [
34
  {
35
  "name": "3840 x 2160",
@@ -82,15 +78,10 @@ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str
82
  return p.replace("{prompt}", positive), n + negative
83
 
84
 
85
-
86
  DESCRIPTIONs = """ㅤㅤㅤ """
87
 
88
  DESCRIPTION = """ㅤㅤㅤ """
89
 
90
-
91
-
92
-
93
-
94
  if not torch.cuda.is_available():
95
  DESCRIPTION += "\n<p>⚠️Running on CPU, This may not work on CPU.</p>"
96
 
@@ -131,6 +122,22 @@ if torch.cuda.is_available():
131
  pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
132
  pipe2.unet = torch.compile(pipe2.unet, mode="reduce-overhead", fullgraph=True)
133
  print("Model Compiled!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
 
135
  def save_image(img):
136
  unique_name = str(uuid.uuid4()) + ".png"
@@ -318,4 +325,4 @@ with gr.Blocks(css=css, theme="xiaobaiyuan/theme_brief") as demo:
318
  )
319
  gr.Markdown(DESCRIPTION)
320
  if __name__ == "__main__":
321
- demo.queue(max_size=20).launch()
 
11
  from diffusers import DiffusionPipeline
12
  from typing import Tuple
13
 
14
+ # Check for the Model Base
 
 
15
 
16
  bad_words = json.loads(os.getenv('BAD_WORDS', "[]"))
17
  bad_words_negative = json.loads(os.getenv('BAD_WORDS_NEGATIVE', "[]"))
 
26
  return True
27
  return False
28
 
 
 
29
  style_list = [
30
  {
31
  "name": "3840 x 2160",
 
78
  return p.replace("{prompt}", positive), n + negative
79
 
80
 
 
81
  DESCRIPTIONs = """ㅤㅤㅤ """
82
 
83
  DESCRIPTION = """ㅤㅤㅤ """
84
 
 
 
 
 
85
  if not torch.cuda.is_available():
86
  DESCRIPTION += "\n<p>⚠️Running on CPU, This may not work on CPU.</p>"
87
 
 
122
  pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
123
  pipe2.unet = torch.compile(pipe2.unet, mode="reduce-overhead", fullgraph=True)
124
  print("Model Compiled!")
125
+ else:
126
+ pipe = DiffusionPipeline.from_pretrained(
127
+ "SG161222/RealVisXL_V4.0",
128
+ torch_dtype=torch.float32,
129
+ use_safetensors=True,
130
+ add_watermarker=False
131
+ )
132
+ pipe2 = DiffusionPipeline.from_pretrained(
133
+ "SG161222/RealVisXL_V3.0",
134
+ torch_dtype=torch.float32,
135
+ use_safetensors=True,
136
+ add_watermarker=False
137
+ )
138
+ pipe.to(device)
139
+ pipe2.to(device)
140
+ print("Loaded on Device!")
141
 
142
  def save_image(img):
143
  unique_name = str(uuid.uuid4()) + ".png"
 
325
  )
326
  gr.Markdown(DESCRIPTION)
327
  if __name__ == "__main__":
328
+ demo.queue(max_size=20).launch()