Spaces:
Running
on
Zero
Running
on
Zero
optimized promptifier
Browse files
app.py
CHANGED
@@ -52,9 +52,9 @@ pipe_edit.to("cuda")
|
|
52 |
|
53 |
def promptifier(prompt):
|
54 |
client1 = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
|
55 |
-
system_instructions1 = "<s>[SYSTEM] Your task is to modify prompt by USER to more better prompt for Image Generation in Stable Diffusion XL
|
56 |
formatted_prompt = f"{system_instructions1} {prompt} [OPTIMIZED_PROMPT]"
|
57 |
-
stream = client1.text_generation(formatted_prompt, max_new_tokens=
|
58 |
return "".join([response.token.text for response in stream if response.token.text != "</s>"])
|
59 |
|
60 |
# Generator
|
@@ -74,21 +74,22 @@ def king(type ,
|
|
74 |
progress=gr.Progress(track_tqdm=True)
|
75 |
):
|
76 |
if type=="Image Editing" :
|
77 |
-
raw_image = Image.open(input_image).convert('RGB')
|
78 |
if randomize_seed:
|
79 |
seed = random.randint(0, 999999)
|
80 |
generator = torch.manual_seed(seed)
|
81 |
output_image = pipe_edit(
|
82 |
-
instruction, negative_prompt=negative_prompt, image=
|
83 |
guidance_scale=guidance_scale, image_guidance_scale=1.5,
|
84 |
num_inference_steps=steps, generator=generator, output_type="latent",
|
85 |
).images
|
86 |
refine = refiner(
|
87 |
-
prompt=instruction,
|
88 |
-
|
|
|
89 |
num_inference_steps=steps,
|
90 |
image=output_image,
|
91 |
generator=generator,
|
|
|
92 |
).images[0]
|
93 |
return seed, refine
|
94 |
else :
|
@@ -118,7 +119,7 @@ def king(type ,
|
|
118 |
|
119 |
refine = refiner( prompt=instruction,
|
120 |
negative_prompt = negative_prompt,
|
121 |
-
guidance_scale =
|
122 |
num_inference_steps= steps,
|
123 |
image=image, generator=generator,
|
124 |
).images[0]
|
@@ -202,7 +203,7 @@ with gr.Blocks(css=css) as demo:
|
|
202 |
fast = gr.Checkbox(label="FAST Generation", value=True, scale=0)
|
203 |
|
204 |
with gr.Row():
|
205 |
-
input_image = gr.Image(label="Image", type='
|
206 |
|
207 |
with gr.Row():
|
208 |
guidance_scale = gr.Number(value=6.0, step=0.1, label="Guidance Scale", interactive=True)
|
|
|
52 |
|
53 |
def promptifier(prompt):
|
54 |
client1 = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
|
55 |
+
system_instructions1 = "<s>[SYSTEM] Act as Image Prompt Generation expert, Your task is to modify prompt by USER to more better prompt for Image Generation in Stable Diffusion XL. \n Modify the user's prompt to generate a high-quality image by incorporating essential keywords and styles. The optimized prompt should include specifications for resolution (4K, HD, 16:9 aspect ratio), image quality (cute, masterpiece, high-quality, vivid colors, intricate details), and desired art styles (realistic, anime, 3D, logo, futuristic, fantasy). Additionally, consider incorporating themes (e.g. futuristic cityscape, magical forest, abstract art), emotions (e.g. serene, energetic, whimsical), and specific objects or characters (e.g. dragons, robots, landscapes). Ensure the prompt is concise, yet comprehensive, to generate an exceptional image that meets the user's expectations. \n Your task is to reply with final optimized prompt only. If you get big prompt make it concise.[USER]"
|
56 |
formatted_prompt = f"{system_instructions1} {prompt} [OPTIMIZED_PROMPT]"
|
57 |
+
stream = client1.text_generation(formatted_prompt, max_new_tokens=100, stream=True, details=True, return_full_text=False)
|
58 |
return "".join([response.token.text for response in stream if response.token.text != "</s>"])
|
59 |
|
60 |
# Generator
|
|
|
74 |
progress=gr.Progress(track_tqdm=True)
|
75 |
):
|
76 |
if type=="Image Editing" :
|
|
|
77 |
if randomize_seed:
|
78 |
seed = random.randint(0, 999999)
|
79 |
generator = torch.manual_seed(seed)
|
80 |
output_image = pipe_edit(
|
81 |
+
instruction, negative_prompt=negative_prompt, image=input_image,
|
82 |
guidance_scale=guidance_scale, image_guidance_scale=1.5,
|
83 |
num_inference_steps=steps, generator=generator, output_type="latent",
|
84 |
).images
|
85 |
refine = refiner(
|
86 |
+
prompt=f"{instruction}, 4k, hd, high quality, masterpiece",
|
87 |
+
negative_prompt = negative_prompt,
|
88 |
+
guidance_scale=7.5,
|
89 |
num_inference_steps=steps,
|
90 |
image=output_image,
|
91 |
generator=generator,
|
92 |
+
width = input_image.width, height = input_image.height,
|
93 |
).images[0]
|
94 |
return seed, refine
|
95 |
else :
|
|
|
119 |
|
120 |
refine = refiner( prompt=instruction,
|
121 |
negative_prompt = negative_prompt,
|
122 |
+
guidance_scale = 7.5,
|
123 |
num_inference_steps= steps,
|
124 |
image=image, generator=generator,
|
125 |
).images[0]
|
|
|
203 |
fast = gr.Checkbox(label="FAST Generation", value=True, scale=0)
|
204 |
|
205 |
with gr.Row():
|
206 |
+
input_image = gr.Image(label="Image", type='pil', interactive=True)
|
207 |
|
208 |
with gr.Row():
|
209 |
guidance_scale = gr.Number(value=6.0, step=0.1, label="Guidance Scale", interactive=True)
|