KingNish commited on
Commit
1a36e0e
1 Parent(s): bc20327

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -11
app.py CHANGED
@@ -10,7 +10,7 @@ from diffusers import DiffusionPipeline, StableDiffusionXLPipeline, EDMEulerSche
10
  from huggingface_hub import hf_hub_download, InferenceClient
11
 
12
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
13
- pipe = StableDiffusionXLPipeline.from_pretrained("fluently/Fluently-XL-Final", torch_dtype=torch.float16, vae=vae)
14
  pipe.load_lora_weights("KingNish/Better-Image-XL-Lora", weight_name="example-03.safetensors", adapter_name="lora")
15
  pipe.set_adapters("lora")
16
  pipe.to("cuda")
@@ -51,19 +51,28 @@ pipe_edit = StableDiffusionXLInstructPix2PixPipeline.from_single_file( edit_file
51
  pipe_edit.scheduler = EDMEulerScheduler(sigma_min=0.002, sigma_max=120.0, sigma_data=1.0, prediction_type="v_prediction")
52
  pipe_edit.to("cuda")
53
 
 
 
 
 
 
 
 
 
54
  # Generator
55
  @spaces.GPU(duration=30, queue=False)
56
  def king(type ,
57
  input_image ,
58
  instruction: str ,
59
  negative_prompt: str ="",
 
60
  steps: int = 25,
61
  randomize_seed: bool = False,
62
  seed: int = 2404,
63
  width: int = 1024,
64
  height: int = 1024,
65
  guidance_scale: float = 6,
66
- fast=True,
67
  use_resolution_binning: bool = True,
68
  progress=gr.Progress(track_tqdm=True),
69
  ):
@@ -90,18 +99,27 @@ def king(type ,
90
  seed = random.randint(0, 999999)
91
  generator = torch.Generator().manual_seed(seed)
92
  if fast:
93
- pipes=pipe_fast
94
  steps=int(steps/2.5)
95
- guidance_scale=(guidance_scale/3)
 
 
 
 
 
 
 
96
  else:
97
- pipes=pipe
98
- image = pipes( prompt = instruction,
 
 
 
99
  negative_prompt=negative_prompt,
100
- guidance_scale = guidance_scale,
101
  num_inference_steps = steps,
102
  width = width, height = height,
103
  generator = generator, output_type="latent",
104
- ).images
105
 
106
  refine = refiner( prompt=instruction,
107
  negative_prompt = negative_prompt,
@@ -180,9 +198,9 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
180
  generate_button = gr.Button("Run", scale=0)
181
  with gr.Row():
182
  type = gr.Dropdown(["Image Generation","Image Editing"], label="Task", value="Image Generation",interactive=True)
183
- enhance_prompt = gr.Checkbox(label="Enhance prompt", value = True, scale=0)
184
- fast = gr.Checkbox(label="FAST Gen", value=True, scale=0)
185
-
186
  with gr.Row():
187
  input_image = gr.Image(label="Image", type='filepath', interactive=True)
188
 
@@ -235,6 +253,7 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
235
  input_image,
236
  instruction,
237
  negative_prompt,
 
238
  steps,
239
  randomize_seed,
240
  seed,
 
10
  from huggingface_hub import hf_hub_download, InferenceClient
11
 
12
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
13
+ pipe = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V3.0", torch_dtype=torch.float16, vae=vae)
14
  pipe.load_lora_weights("KingNish/Better-Image-XL-Lora", weight_name="example-03.safetensors", adapter_name="lora")
15
  pipe.set_adapters("lora")
16
  pipe.to("cuda")
 
51
  pipe_edit.scheduler = EDMEulerScheduler(sigma_min=0.002, sigma_max=120.0, sigma_data=1.0, prediction_type="v_prediction")
52
  pipe_edit.to("cuda")
53
 
54
+ def promptifier(prompt):
55
+ client1 = InferenceClient("mistralai/Mistral-7B-Instruct-v0.2")
56
+ system_instructions1 = "<s>[SYSTEM] Your task is to modify prompt by USER to more better prompt for Image Generation in Stable Diffusion XL, reply with prompt only, Your task is to reply with final prompt in SDXL image generation format only. [USER]"
57
+ formatted_prompt = f"{system_instructions1} {prompt} [FINAL_PROMPT]"
58
+ stream = client1.text_generation(formatted_prompt, max_new_tokens=80, stream=True, details=True, return_full_text=False)
59
+ instructions = "".join([response.token.text for response in stream if response.token.text != "</s>"])
60
+ return instructions
61
+
62
  # Generator
63
  @spaces.GPU(duration=30, queue=False)
64
  def king(type ,
65
  input_image ,
66
  instruction: str ,
67
  negative_prompt: str ="",
68
+ enhance_prompt: bool = True
69
  steps: int = 25,
70
  randomize_seed: bool = False,
71
  seed: int = 2404,
72
  width: int = 1024,
73
  height: int = 1024,
74
  guidance_scale: float = 6,
75
+ fast=False,
76
  use_resolution_binning: bool = True,
77
  progress=gr.Progress(track_tqdm=True),
78
  ):
 
99
  seed = random.randint(0, 999999)
100
  generator = torch.Generator().manual_seed(seed)
101
  if fast:
 
102
  steps=int(steps/2.5)
103
+ guidance_scale2=(guidance_scale/3)
104
+
105
+ image = pipe_fast( prompt = instruction,
106
+ guidance_scale = guidance_scale2,
107
+ num_inference_steps = steps,
108
+ width = width, height = height,
109
+ generator = generator, output_type="latent",
110
+ ).images
111
  else:
112
+ guidance_scale2=(guidance_scale/2)
113
+ if enhance_prompt:
114
+ instruction = promptifier(instruction)
115
+
116
+ image = pipe( prompt = instruction,
117
  negative_prompt=negative_prompt,
118
+ guidance_scale = guidance_scale2,
119
  num_inference_steps = steps,
120
  width = width, height = height,
121
  generator = generator, output_type="latent",
122
+ ).images
123
 
124
  refine = refiner( prompt=instruction,
125
  negative_prompt = negative_prompt,
 
198
  generate_button = gr.Button("Run", scale=0)
199
  with gr.Row():
200
  type = gr.Dropdown(["Image Generation","Image Editing"], label="Task", value="Image Generation",interactive=True)
201
+ enhance_prompt = gr.Checkbox(label="Enhance prompt", value=True, scale=0)
202
+ fast = gr.Checkbox(label="FAST Generation", value=False, scale=0)
203
+
204
  with gr.Row():
205
  input_image = gr.Image(label="Image", type='filepath', interactive=True)
206
 
 
253
  input_image,
254
  instruction,
255
  negative_prompt,
256
+ enhance_prompt,
257
  steps,
258
  randomize_seed,
259
  seed,