File size: 2,397 Bytes
9b87edc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88e3c7c
9b87edc
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import gradio as gr
import numpy as np
from PIFuHD.data import EvalWMetaDataset
from PIFuHD.data.ImageBundle import ImageBundle
from PIFuHD.options import BaseOptions
from PIFuHD.recontructor import Reconstructor
from huggingface_hub import hf_hub_download
from human_pose_estimator import PoseEstimator
from estimator import rect

REPO_ID = "cxeep/PIFuHD"

pose_estimator = PoseEstimator("cpu")

checkpoint_path = hf_hub_download(repo_id=REPO_ID, filename="pifuhd.pt")

cmd = [
    '--dataroot', './data',
    '--results_path', './results',
    '--loadSize', '1024',
    '--resolution', '256',
    '--load_netMR_checkpoint_path', checkpoint_path,
    '--start_id', '-1',
    '--end_id', '-1'
]

parser = BaseOptions()
opts = parser.parse(cmd)
reconstructor = Reconstructor(opts)


def make_bundle(image, name):
    image, rects = rect(pose_estimator, image)
    return ImageBundle(img=image, name=name, meta=rects)


def predict(img: np.ndarray):
    bundle = make_bundle(img, "Model3D")
    dataset = EvalWMetaDataset(opts, [bundle])
    img, model = reconstructor.evaluate(dataset)
    return img, model, model


footer = r"""
<center>
<b>
Demo for <a href='https://github.com/facebookresearch/pifuhd'>PIFuHD</a>
</b>
</center>
"""

with gr.Blocks(title="PIFuHD") as app:
    gr.HTML("<center><h1>3D Human Digitization</h1></center>")
    gr.HTML("<center><h3>PIFuHD: Multi-Level Pixel-Aligned Implicit Function for High-Resolution 3D Human Digitization (CVPR 2020)</h3></center>")
    with gr.Row(equal_height=False):
        with gr.Column():
            input_img = gr.Image(type="numpy", label="Input image")
            run_btn = gr.Button(variant="primary")
        with gr.Column():
            output_obj = gr.Model3D(label="Output model")
            output_img = gr.Image(type="filepath", label="Output image")
            output_file = gr.File(label="Download 3D Model")
            gr.ClearButton(components=[input_img, output_img, output_obj, output_file], variant="stop")

    run_btn.click(predict, [input_img], [output_img, output_obj, output_file])

    with gr.Row():
        blobs = [[f"examples/{x:02d}.png"] for x in range(1, 4)]
        examples = gr.Dataset(components=[input_img], samples=blobs)
        examples.click(lambda x: x[0], [examples], [input_img])

    with gr.Row():
        gr.HTML(footer)

app.launch(share=False, debug=True, show_error=True)
app.queue()