Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -259,7 +259,7 @@ def inference(input_prompt, input_category):
|
|
259 |
seed_everything(opt.seed)
|
260 |
|
261 |
tic = time.time()
|
262 |
-
config =
|
263 |
model = load_model_from_config(config, f"{opt.sd_ckpt}")
|
264 |
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
265 |
model = model.to(device)
|
@@ -359,7 +359,9 @@ def inference(input_prompt, input_category):
|
|
359 |
# cv2.imwrite(os.path.join("demo/demo_mask.png"), done_image_mask)
|
360 |
|
361 |
# torchvision.utils.save_image(annotation_pred, os.path.join("demo/demo_segresult.png"), normalize=True, scale_each=True)
|
362 |
-
|
|
|
|
|
363 |
|
364 |
|
365 |
# def make_transparent_foreground(pic, mask):
|
@@ -419,14 +421,154 @@ def inference(input_prompt, input_category):
|
|
419 |
# model = torch.hub.load('pytorch/vision:v0.6.0', 'deeplabv3_resnet101', pretrained=True)
|
420 |
# model.eval()
|
421 |
|
422 |
-
gr.Interface(
|
423 |
-
|
424 |
-
|
425 |
-
|
426 |
-
|
427 |
-
|
428 |
-
|
429 |
-
|
430 |
-
|
431 |
-
|
432 |
-
).launch(debug=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
259 |
seed_everything(opt.seed)
|
260 |
|
261 |
tic = time.time()
|
262 |
+
config = x.load(f"{opt.config}")
|
263 |
model = load_model_from_config(config, f"{opt.sd_ckpt}")
|
264 |
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
265 |
model = model.to(device)
|
|
|
359 |
# cv2.imwrite(os.path.join("demo/demo_mask.png"), done_image_mask)
|
360 |
|
361 |
# torchvision.utils.save_image(annotation_pred, os.path.join("demo/demo_segresult.png"), normalize=True, scale_each=True)
|
362 |
+
generated_image = x_sample
|
363 |
+
generated_mask = done_image_mask
|
364 |
+
return [generated_image, generated_mask]
|
365 |
|
366 |
|
367 |
# def make_transparent_foreground(pic, mask):
|
|
|
421 |
# model = torch.hub.load('pytorch/vision:v0.6.0', 'deeplabv3_resnet101', pretrained=True)
|
422 |
# model.eval()
|
423 |
|
424 |
+
# gr.Interface(
|
425 |
+
# inference,
|
426 |
+
# gr.inputs.Textbox(label='Prompt', default='a photo of a lion on a mountain top at sunset'),
|
427 |
+
# gr.inputs.Textbox(label='category', default='lion'),
|
428 |
+
# gr.outputs.Image(type="pil", label="Output"),
|
429 |
+
# # title=title,
|
430 |
+
# # description=description,
|
431 |
+
# # article=article,
|
432 |
+
# # examples=[['demis.jpg'], ['lifeifei.png']],
|
433 |
+
# # enable_queue=True
|
434 |
+
# ).launch(debug=False)
|
435 |
+
|
436 |
+
def main():
|
437 |
+
|
438 |
+
# def load_example(
|
439 |
+
# steps: int,
|
440 |
+
# randomize_seed: bool,
|
441 |
+
# seed: int,
|
442 |
+
# randomize_cfg: bool,
|
443 |
+
# text_cfg_scale: float,
|
444 |
+
# image_cfg_scale: float,
|
445 |
+
# ):
|
446 |
+
# example_instruction = random.choice(example_instructions)
|
447 |
+
# return [example_image, example_instruction] + generate(
|
448 |
+
# example_image,
|
449 |
+
# example_instruction,
|
450 |
+
# steps,
|
451 |
+
# randomize_seed,
|
452 |
+
# seed,
|
453 |
+
# randomize_cfg,
|
454 |
+
# text_cfg_scale,
|
455 |
+
# image_cfg_scale,
|
456 |
+
# )
|
457 |
+
|
458 |
+
# def generate(
|
459 |
+
# input_image: Image.Image,
|
460 |
+
# instruction: str,
|
461 |
+
# steps: int,
|
462 |
+
# randomize_seed: bool,
|
463 |
+
# seed: int,
|
464 |
+
# randomize_cfg: bool,
|
465 |
+
# text_cfg_scale: float,
|
466 |
+
# image_cfg_scale: float,
|
467 |
+
# ):
|
468 |
+
# seed = random.randint(0, 100000) if randomize_seed else seed
|
469 |
+
# text_cfg_scale = round(random.uniform(6.0, 9.0), ndigits=2) if randomize_cfg else text_cfg_scale
|
470 |
+
# image_cfg_scale = round(random.uniform(1.2, 1.8), ndigits=2) if randomize_cfg else image_cfg_scale
|
471 |
+
|
472 |
+
# width, height = input_image.size
|
473 |
+
# factor = 512 / max(width, height)
|
474 |
+
# factor = math.ceil(min(width, height) * factor / 64) * 64 / min(width, height)
|
475 |
+
# width = int((width * factor) // 64) * 64
|
476 |
+
# height = int((height * factor) // 64) * 64
|
477 |
+
# input_image = ImageOps.fit(input_image, (width, height), method=Image.Resampling.LANCZOS)
|
478 |
+
|
479 |
+
# if instruction == "":
|
480 |
+
# return [input_image, seed]
|
481 |
+
|
482 |
+
# generator = torch.manual_seed(seed)
|
483 |
+
# edited_image = pipe(
|
484 |
+
# instruction, image=input_image,
|
485 |
+
# guidance_scale=text_cfg_scale, image_guidance_scale=image_cfg_scale,
|
486 |
+
# num_inference_steps=steps, generator=generator,
|
487 |
+
# ).images[0]
|
488 |
+
# return [seed, text_cfg_scale, image_cfg_scale, edited_image]
|
489 |
+
|
490 |
+
# def reset():
|
491 |
+
# return [0, "Randomize Seed", 1371, "Fix CFG", 7.5, 1.5, None]
|
492 |
+
|
493 |
+
with gr.Blocks() as demo:
|
494 |
+
gr.HTML("""<h1 style="font-weight: 900; margin-bottom: 7px;">
|
495 |
+
InstructPix2Pix: Learning to Follow Image Editing Instructions
|
496 |
+
</h1>
|
497 |
+
<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.
|
498 |
+
<br/>
|
499 |
+
<a href="https://huggingface.co/spaces/timbrooks/instruct-pix2pix?duplicate=true">
|
500 |
+
<img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
|
501 |
+
<p/>""")
|
502 |
+
with gr.Row():
|
503 |
+
# with gr.Column(scale=1, min_width=100):
|
504 |
+
# load_button = gr.Button("Load Example")
|
505 |
+
# with gr.Column(scale=1, min_width=100):
|
506 |
+
# reset_button = gr.Button("Reset")
|
507 |
+
with gr.Column(scale=3):
|
508 |
+
Prompt = gr.Textbox(lines=1, label="Prompt", interactive=True)
|
509 |
+
with gr.Column(scale=2):
|
510 |
+
Category = gr.Textbox(lines=1, label="Category", interactive=True)
|
511 |
+
with gr.Column(scale=1, min_width=100):
|
512 |
+
generate_button = gr.Button("Generate")
|
513 |
+
|
514 |
+
with gr.Row():
|
515 |
+
generated_image = gr.Image(label="Generated Image", type="pil", interactive=False)
|
516 |
+
generated_mask = gr.Image(label=f"Generated Mask", type="pil", interactive=False)
|
517 |
+
generated_image.style(height=512, width=512)
|
518 |
+
generated_mask.style(height=512, width=512)
|
519 |
+
|
520 |
+
# with gr.Row():
|
521 |
+
# steps = gr.Number(value=50, precision=0, label="Steps", interactive=True)
|
522 |
+
# randomize_seed = gr.Radio(
|
523 |
+
# ["Fix Seed", "Randomize Seed"],
|
524 |
+
# value="Randomize Seed",
|
525 |
+
# type="index",
|
526 |
+
# show_label=False,
|
527 |
+
# interactive=True,
|
528 |
+
# )
|
529 |
+
# seed = gr.Number(value=1371, precision=0, label="Seed", interactive=True)
|
530 |
+
# randomize_cfg = gr.Radio(
|
531 |
+
# ["Fix CFG", "Randomize CFG"],
|
532 |
+
# value="Fix CFG",
|
533 |
+
# type="index",
|
534 |
+
# show_label=False,
|
535 |
+
# interactive=True,
|
536 |
+
# )
|
537 |
+
# text_cfg_scale = gr.Number(value=7.5, label=f"Text CFG", interactive=True)
|
538 |
+
# image_cfg_scale = gr.Number(value=1.5, label=f"Image CFG", interactive=True)
|
539 |
+
|
540 |
+
gr.Markdown(help_text)
|
541 |
+
|
542 |
+
# load_button.click(
|
543 |
+
# fn=load_example,
|
544 |
+
# inputs=[
|
545 |
+
# steps,
|
546 |
+
# randomize_seed,
|
547 |
+
# seed,
|
548 |
+
# randomize_cfg,
|
549 |
+
# text_cfg_scale,
|
550 |
+
# image_cfg_scale,
|
551 |
+
# ],
|
552 |
+
# outputs=[input_image, instruction, seed, text_cfg_scale, image_cfg_scale, edited_image],
|
553 |
+
# )
|
554 |
+
generate_button.click(
|
555 |
+
fn=inference,
|
556 |
+
inputs=[
|
557 |
+
Prompt,
|
558 |
+
Category,
|
559 |
+
],
|
560 |
+
outputs=[generated_image, generated_mask],
|
561 |
+
)
|
562 |
+
# reset_button.click(
|
563 |
+
# fn=reset,
|
564 |
+
# inputs=[],
|
565 |
+
# outputs=[steps, randomize_seed, seed, randomize_cfg, text_cfg_scale, image_cfg_scale, edited_image],
|
566 |
+
# )
|
567 |
+
|
568 |
+
demo.queue(concurrency_count=1)
|
569 |
+
demo.launch(share=False)
|
570 |
+
|
571 |
+
|
572 |
+
|
573 |
+
if __name__ == "__main__":
|
574 |
+
main()
|