Spaces:
Running
on
Zero
Running
on
Zero
Commit
β’
1527335
1
Parent(s):
863d92e
det2 (#6)
Browse files- det2 (7aefd5a7a5801098d4b47a97d370e8879429dd6c)
Co-authored-by: Minho Park <[email protected]>
This view is limited to 50 files because it contains too many changes. Β
See raw diff
- app.py +37 -22
- preprocess/detectron2/projects/DensePose/apply_net.py β apply_net.py +69 -126
- densepose +0 -1
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/__init__.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/config.py +0 -0
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/converters/__init__.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/converters/base.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/converters/builtin.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/converters/chart_output_hflip.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/converters/chart_output_to_chart_result.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/converters/hflip.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/converters/segm_to_mask.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/converters/to_chart_result.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/converters/to_mask.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/__init__.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/build.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/combined_loader.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/dataset_mapper.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/datasets/__init__.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/datasets/builtin.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/datasets/chimpnsee.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/datasets/coco.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/datasets/dataset_type.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/datasets/lvis.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/image_list_dataset.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/inference_based_loader.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/meshes/__init__.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/meshes/builtin.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/meshes/catalog.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/samplers/__init__.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/samplers/densepose_base.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/samplers/densepose_confidence_based.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/samplers/densepose_cse_base.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/samplers/densepose_cse_confidence_based.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/samplers/densepose_cse_uniform.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/samplers/densepose_uniform.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/samplers/mask_from_densepose.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/samplers/prediction_to_gt.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/transform/__init__.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/transform/image.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/utils.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/video/__init__.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/video/frame_selector.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/data/video/video_keyframe_dataset.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/engine/__init__.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/engine/trainer.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/evaluation/__init__.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/evaluation/d2_evaluator_adapter.py +0 -2
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/evaluation/densepose_coco_evaluation.py +0 -3
- {preprocess/detectron2/projects/DensePose/densepose β densepose}/evaluation/evaluator.py +0 -2
app.py
CHANGED
@@ -2,23 +2,25 @@
|
|
2 |
import os
|
3 |
import sys
|
4 |
import time
|
5 |
-
from pathlib import Path
|
6 |
-
from omegaconf import OmegaConf
|
7 |
from glob import glob
|
8 |
from os.path import join as opj
|
|
|
9 |
|
|
|
10 |
import gradio as gr
|
11 |
-
from PIL import Image
|
12 |
import torch
|
|
|
|
|
13 |
|
14 |
-
from utils_stableviton import get_mask_location, get_batch, tensor2img
|
15 |
from cldm.model import create_model
|
16 |
from cldm.plms_hacked import PLMSSampler
|
|
|
|
|
17 |
|
18 |
PROJECT_ROOT = Path(__file__).absolute().parents[1].absolute()
|
19 |
sys.path.insert(0, str(PROJECT_ROOT))
|
20 |
|
21 |
-
from
|
22 |
from preprocess.humanparsing.run_parsing import Parsing
|
23 |
from preprocess.openpose.run_openpose import OpenPose
|
24 |
|
@@ -30,10 +32,10 @@ IMG_W = 384
|
|
30 |
openpose_model_hd = OpenPose(0)
|
31 |
openpose_model_hd.preprocessor.body_estimation.model.to('cuda')
|
32 |
parsing_model_hd = Parsing(0)
|
33 |
-
densepose_model_hd = DensePose4Gradio(
|
34 |
-
|
35 |
-
|
36 |
-
)
|
37 |
|
38 |
category_dict = ['upperbody', 'lowerbody', 'dress']
|
39 |
category_dict_utils = ['upper_body', 'lower_body', 'dresses']
|
@@ -50,6 +52,8 @@ model = model.cuda()
|
|
50 |
model.eval()
|
51 |
sampler = PLMSSampler(model)
|
52 |
# #### model init <<<<
|
|
|
|
|
53 |
def stable_viton_model_hd(
|
54 |
batch,
|
55 |
n_steps,
|
@@ -69,25 +73,27 @@ def stable_viton_model_hd(
|
|
69 |
sampler.model.batch = batch
|
70 |
|
71 |
ts = torch.full((1,), 999, device=z.device, dtype=torch.long)
|
72 |
-
start_code = model.q_sample(z, ts)
|
73 |
|
74 |
output, _, _ = sampler.sample(
|
75 |
n_steps,
|
76 |
bs,
|
77 |
-
(4, IMG_H//8, IMG_W//8),
|
78 |
cond,
|
79 |
-
x_T=start_code,
|
80 |
verbose=False,
|
81 |
eta=0.0,
|
82 |
-
unconditional_conditioning=uc_full,
|
83 |
)
|
84 |
|
85 |
output = model.decode_first_stage(output)
|
86 |
output = tensor2img(output)
|
87 |
pil_output = Image.fromarray(output)
|
88 |
return pil_output
|
89 |
-
|
90 |
# @spaces.GPU # TODO: turn on when final upload
|
|
|
|
|
91 |
@torch.no_grad()
|
92 |
def process_hd(vton_img, garm_img, n_steps):
|
93 |
model_type = 'hd'
|
@@ -112,19 +118,28 @@ def process_hd(vton_img, garm_img, n_steps):
|
|
112 |
stt = time.time()
|
113 |
print('get densepose... ', end='')
|
114 |
vton_img = vton_img.resize((IMG_W, IMG_H)) # size for densepose
|
115 |
-
densepose = densepose_model_hd.execute(vton_img) # densepose
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
print('%.2fs' % (time.time() - stt))
|
117 |
|
118 |
batch = get_batch(
|
119 |
-
vton_img,
|
120 |
-
garm_img,
|
121 |
-
densepose,
|
122 |
-
masked_vton_img,
|
123 |
-
mask,
|
124 |
-
IMG_H,
|
125 |
IMG_W
|
126 |
)
|
127 |
-
|
128 |
sample = stable_viton_model_hd(
|
129 |
batch,
|
130 |
n_steps
|
|
|
2 |
import os
|
3 |
import sys
|
4 |
import time
|
|
|
|
|
5 |
from glob import glob
|
6 |
from os.path import join as opj
|
7 |
+
from pathlib import Path
|
8 |
|
9 |
+
import apply_net
|
10 |
import gradio as gr
|
|
|
11 |
import torch
|
12 |
+
from omegaconf import OmegaConf
|
13 |
+
from PIL import Image
|
14 |
|
|
|
15 |
from cldm.model import create_model
|
16 |
from cldm.plms_hacked import PLMSSampler
|
17 |
+
from detectron2.data.detection_utils import _apply_exif_orientation, convert_PIL_to_numpy
|
18 |
+
from utils_stableviton import get_batch, get_mask_location, tensor2img
|
19 |
|
20 |
PROJECT_ROOT = Path(__file__).absolute().parents[1].absolute()
|
21 |
sys.path.insert(0, str(PROJECT_ROOT))
|
22 |
|
23 |
+
# from detectron2.projects.DensePose.apply_net_gradio import DensePose4Gradio
|
24 |
from preprocess.humanparsing.run_parsing import Parsing
|
25 |
from preprocess.openpose.run_openpose import OpenPose
|
26 |
|
|
|
32 |
openpose_model_hd = OpenPose(0)
|
33 |
openpose_model_hd.preprocessor.body_estimation.model.to('cuda')
|
34 |
parsing_model_hd = Parsing(0)
|
35 |
+
# densepose_model_hd = DensePose4Gradio(
|
36 |
+
# cfg='preprocess/detectron2/projects/DensePose/configs/densepose_rcnn_R_50_FPN_s1x.yaml',
|
37 |
+
# model='https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_s1x/165712039/model_final_162be9.pkl',
|
38 |
+
# )
|
39 |
|
40 |
category_dict = ['upperbody', 'lowerbody', 'dress']
|
41 |
category_dict_utils = ['upper_body', 'lower_body', 'dresses']
|
|
|
52 |
model.eval()
|
53 |
sampler = PLMSSampler(model)
|
54 |
# #### model init <<<<
|
55 |
+
|
56 |
+
|
57 |
def stable_viton_model_hd(
|
58 |
batch,
|
59 |
n_steps,
|
|
|
73 |
sampler.model.batch = batch
|
74 |
|
75 |
ts = torch.full((1,), 999, device=z.device, dtype=torch.long)
|
76 |
+
start_code = model.q_sample(z, ts)
|
77 |
|
78 |
output, _, _ = sampler.sample(
|
79 |
n_steps,
|
80 |
bs,
|
81 |
+
(4, IMG_H // 8, IMG_W // 8),
|
82 |
cond,
|
83 |
+
x_T=start_code,
|
84 |
verbose=False,
|
85 |
eta=0.0,
|
86 |
+
unconditional_conditioning=uc_full,
|
87 |
)
|
88 |
|
89 |
output = model.decode_first_stage(output)
|
90 |
output = tensor2img(output)
|
91 |
pil_output = Image.fromarray(output)
|
92 |
return pil_output
|
93 |
+
|
94 |
# @spaces.GPU # TODO: turn on when final upload
|
95 |
+
|
96 |
+
|
97 |
@torch.no_grad()
|
98 |
def process_hd(vton_img, garm_img, n_steps):
|
99 |
model_type = 'hd'
|
|
|
118 |
stt = time.time()
|
119 |
print('get densepose... ', end='')
|
120 |
vton_img = vton_img.resize((IMG_W, IMG_H)) # size for densepose
|
121 |
+
# densepose = densepose_model_hd.execute(vton_img) # densepose
|
122 |
+
|
123 |
+
human_img_arg = _apply_exif_orientation(vton_img.resize((IMG_W, IMG_H)))
|
124 |
+
human_img_arg = convert_PIL_to_numpy(human_img_arg, format="BGR")
|
125 |
+
args = apply_net.create_argument_parser().parse_args(('show', './configs/densepose_rcnn_R_50_FPN_s1x.yaml', './ckpt/densepose/model_final_162be9.pkl', 'dp_segm', '-v', '--opts', 'MODEL.DEVICE', 'cuda'))
|
126 |
+
# verbosity = getattr(args, "verbosity", None)
|
127 |
+
pose_img = args.func(args, human_img_arg)
|
128 |
+
pose_img = pose_img[:, :, ::-1]
|
129 |
+
pose_img = Image.fromarray(pose_img).resize((IMG_W, IMG_H))
|
130 |
+
|
131 |
print('%.2fs' % (time.time() - stt))
|
132 |
|
133 |
batch = get_batch(
|
134 |
+
vton_img,
|
135 |
+
garm_img,
|
136 |
+
densepose,
|
137 |
+
masked_vton_img,
|
138 |
+
mask,
|
139 |
+
IMG_H,
|
140 |
IMG_W
|
141 |
)
|
142 |
+
|
143 |
sample = stable_viton_model_hd(
|
144 |
batch,
|
145 |
n_steps
|
preprocess/detectron2/projects/DensePose/apply_net.py β apply_net.py
RENAMED
@@ -7,29 +7,40 @@ import logging
|
|
7 |
import os
|
8 |
import sys
|
9 |
from typing import Any, ClassVar, Dict, List
|
10 |
-
|
11 |
-
import cv2
|
12 |
-
import numpy as np
|
13 |
import torch
|
14 |
-
|
15 |
-
from densepose.structures import DensePoseChartPredictorOutput, DensePoseEmbeddingPredictorOutput
|
16 |
-
from densepose.utils.logger import verbosity_to_level
|
17 |
-
from densepose.vis.base import CompoundVisualizer
|
18 |
-
from densepose.vis.bounding_box import ScoredBoundingBoxVisualizer
|
19 |
-
from densepose.vis.densepose_outputs_vertex import (DensePoseOutputsTextureVisualizer, DensePoseOutputsVertexVisualizer,
|
20 |
-
get_texture_atlases)
|
21 |
-
from densepose.vis.densepose_results import (DensePoseResultsContourVisualizer,
|
22 |
-
DensePoseResultsFineSegmentationVisualizer, DensePoseResultsUVisualizer,
|
23 |
-
DensePoseResultsVVisualizer)
|
24 |
-
from densepose.vis.densepose_results_textures import DensePoseResultsVisualizerWithTexture, get_texture_atlas
|
25 |
-
from densepose.vis.extractor import (CompoundExtractor, DensePoseOutputsExtractor, DensePoseResultExtractor,
|
26 |
-
create_extractor)
|
27 |
from detectron2.config import CfgNode, get_cfg
|
28 |
from detectron2.data.detection_utils import read_image
|
29 |
from detectron2.engine.defaults import DefaultPredictor
|
30 |
from detectron2.structures.instances import Instances
|
31 |
from detectron2.utils.logger import setup_logger
|
32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
DOC = """Apply Net - a tool to print / visualize DensePose results
|
35 |
"""
|
@@ -40,9 +51,9 @@ logger = logging.getLogger(LOGGER_NAME)
|
|
40 |
_ACTION_REGISTRY: Dict[str, "Action"] = {}
|
41 |
|
42 |
|
43 |
-
class Action
|
44 |
@classmethod
|
45 |
-
def add_arguments(cls, parser: argparse.ArgumentParser):
|
46 |
parser.add_argument(
|
47 |
"-v",
|
48 |
"--verbosity",
|
@@ -51,7 +62,7 @@ class Action(object):
|
|
51 |
)
|
52 |
|
53 |
|
54 |
-
def register_action(cls):
|
55 |
"""
|
56 |
Decorator for action classes to automate action registration
|
57 |
"""
|
@@ -62,11 +73,10 @@ def register_action(cls):
|
|
62 |
|
63 |
class InferenceAction(Action):
|
64 |
@classmethod
|
65 |
-
def add_arguments(cls, parser: argparse.ArgumentParser):
|
66 |
super(InferenceAction, cls).add_arguments(parser)
|
67 |
parser.add_argument("cfg", metavar="<config>", help="Config file")
|
68 |
parser.add_argument("model", metavar="<model>", help="Model file")
|
69 |
-
parser.add_argument("input", metavar="<input>", help="Input data")
|
70 |
parser.add_argument(
|
71 |
"--opts",
|
72 |
help="Modify config options using the command-line 'KEY VALUE' pairs",
|
@@ -75,28 +85,29 @@ class InferenceAction(Action):
|
|
75 |
)
|
76 |
|
77 |
@classmethod
|
78 |
-
def execute(cls, args: argparse.Namespace):
|
79 |
logger.info(f"Loading config from {args.cfg}")
|
80 |
opts = []
|
81 |
cfg = cls.setup_config(args.cfg, args.model, args, opts)
|
82 |
logger.info(f"Loading model from {args.model}")
|
83 |
predictor = DefaultPredictor(cfg)
|
84 |
-
logger.info(f"Loading data from {args.input}")
|
85 |
-
file_list = cls._get_input_file_list(args.input)
|
86 |
-
if len(file_list) == 0:
|
87 |
-
|
88 |
-
|
89 |
context = cls.create_context(args, cfg)
|
90 |
-
for file_name in file_list:
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
cls.postexecute(context)
|
|
|
96 |
|
97 |
@classmethod
|
98 |
def setup_config(
|
99 |
-
cls, config_fpath: str, model_fpath: str, args: argparse.Namespace, opts: List[str]
|
100 |
):
|
101 |
cfg = get_cfg()
|
102 |
add_densepose_config(cfg)
|
@@ -109,7 +120,7 @@ class InferenceAction(Action):
|
|
109 |
return cfg
|
110 |
|
111 |
@classmethod
|
112 |
-
def _get_input_file_list(cls, input_spec: str):
|
113 |
if os.path.isdir(input_spec):
|
114 |
file_list = [
|
115 |
os.path.join(input_spec, fname)
|
@@ -132,13 +143,13 @@ class DumpAction(InferenceAction):
|
|
132 |
COMMAND: ClassVar[str] = "dump"
|
133 |
|
134 |
@classmethod
|
135 |
-
def add_parser(cls, subparsers: argparse._SubParsersAction):
|
136 |
parser = subparsers.add_parser(cls.COMMAND, help="Dump model outputs to a file.")
|
137 |
cls.add_arguments(parser)
|
138 |
parser.set_defaults(func=cls.execute)
|
139 |
|
140 |
@classmethod
|
141 |
-
def add_arguments(cls, parser: argparse.ArgumentParser):
|
142 |
super(DumpAction, cls).add_arguments(parser)
|
143 |
parser.add_argument(
|
144 |
"--output",
|
@@ -149,7 +160,7 @@ class DumpAction(InferenceAction):
|
|
149 |
|
150 |
@classmethod
|
151 |
def execute_on_outputs(
|
152 |
-
cls, context: Dict[str, Any], entry: Dict[str, Any], outputs: Instances
|
153 |
):
|
154 |
image_fpath = entry["file_name"]
|
155 |
logger.info(f"Processing {image_fpath}")
|
@@ -167,12 +178,12 @@ class DumpAction(InferenceAction):
|
|
167 |
context["results"].append(result)
|
168 |
|
169 |
@classmethod
|
170 |
-
def create_context(cls, args: argparse.Namespace, cfg: CfgNode):
|
171 |
context = {"results": [], "out_fname": args.output}
|
172 |
return context
|
173 |
|
174 |
@classmethod
|
175 |
-
def postexecute(cls, context: Dict[str, Any]):
|
176 |
out_fname = context["out_fname"]
|
177 |
out_dir = os.path.dirname(out_fname)
|
178 |
if len(out_dir) > 0 and not os.path.exists(out_dir):
|
@@ -201,13 +212,13 @@ class ShowAction(InferenceAction):
|
|
201 |
}
|
202 |
|
203 |
@classmethod
|
204 |
-
def add_parser(cls, subparsers: argparse._SubParsersAction):
|
205 |
parser = subparsers.add_parser(cls.COMMAND, help="Visualize selected entries")
|
206 |
cls.add_arguments(parser)
|
207 |
parser.set_defaults(func=cls.execute)
|
208 |
|
209 |
@classmethod
|
210 |
-
def add_arguments(cls, parser: argparse.ArgumentParser):
|
211 |
super(ShowAction, cls).add_arguments(parser)
|
212 |
parser.add_argument(
|
213 |
"visualizations",
|
@@ -246,7 +257,7 @@ class ShowAction(InferenceAction):
|
|
246 |
|
247 |
@classmethod
|
248 |
def setup_config(
|
249 |
-
cls, config_fpath: str, model_fpath: str, args: argparse.Namespace, opts: List[str]
|
250 |
):
|
251 |
opts.append("MODEL.ROI_HEADS.SCORE_THRESH_TEST")
|
252 |
opts.append(str(args.min_score))
|
@@ -258,21 +269,23 @@ class ShowAction(InferenceAction):
|
|
258 |
|
259 |
@classmethod
|
260 |
def execute_on_outputs(
|
261 |
-
cls, context: Dict[str, Any], entry: Dict[str, Any], outputs: Instances
|
262 |
):
|
263 |
import cv2
|
264 |
import numpy as np
|
265 |
-
|
266 |
visualizer = context["visualizer"]
|
267 |
extractor = context["extractor"]
|
268 |
-
image_fpath = entry["file_name"]
|
269 |
-
logger.info(f"Processing {image_fpath}")
|
270 |
image = cv2.cvtColor(entry["image"], cv2.COLOR_BGR2GRAY)
|
271 |
image = np.tile(image[:, :, np.newaxis], [1, 1, 3])
|
272 |
data = extractor(outputs)
|
273 |
image_vis = visualizer.visualize(image, data)
|
|
|
|
|
274 |
entry_idx = context["entry_idx"] + 1
|
275 |
-
out_fname =
|
|
|
276 |
out_dir = os.path.dirname(out_fname)
|
277 |
if len(out_dir) > 0 and not os.path.exists(out_dir):
|
278 |
os.makedirs(out_dir)
|
@@ -281,16 +294,17 @@ class ShowAction(InferenceAction):
|
|
281 |
context["entry_idx"] += 1
|
282 |
|
283 |
@classmethod
|
284 |
-
def postexecute(cls, context: Dict[str, Any]):
|
285 |
pass
|
|
|
286 |
|
287 |
@classmethod
|
288 |
-
def _get_out_fname(cls, entry_idx: int, fname_base: str):
|
289 |
base, ext = os.path.splitext(fname_base)
|
290 |
return base + ".{0:04d}".format(entry_idx) + ext
|
291 |
|
292 |
@classmethod
|
293 |
-
def create_context(cls, args: argparse.Namespace, cfg: CfgNode) -> Dict[str, Any]:
|
294 |
vis_specs = args.visualizations.split(",")
|
295 |
visualizers = []
|
296 |
extractors = []
|
@@ -316,80 +330,6 @@ class ShowAction(InferenceAction):
|
|
316 |
return context
|
317 |
|
318 |
|
319 |
-
@register_action
|
320 |
-
class GetAction(InferenceAction):
|
321 |
-
"""
|
322 |
-
Get action that outputs results to a pickle file
|
323 |
-
"""
|
324 |
-
|
325 |
-
COMMAND: ClassVar[str] = "get"
|
326 |
-
|
327 |
-
@classmethod
|
328 |
-
def add_parser(cls, subparsers: argparse._SubParsersAction):
|
329 |
-
parser = subparsers.add_parser(cls.COMMAND, help="Get model outputs to a file.")
|
330 |
-
cls.add_arguments(parser)
|
331 |
-
parser.set_defaults(func=cls.execute)
|
332 |
-
|
333 |
-
@classmethod
|
334 |
-
def add_arguments(cls, parser: argparse.ArgumentParser):
|
335 |
-
super(GetAction, cls).add_arguments(parser)
|
336 |
-
parser.add_argument(
|
337 |
-
"--output",
|
338 |
-
metavar="<dump_file>",
|
339 |
-
default="",
|
340 |
-
help="File name to save images to",
|
341 |
-
)
|
342 |
-
|
343 |
-
@classmethod
|
344 |
-
def execute_on_outputs(
|
345 |
-
cls, context: Dict[str, Any], entry: Dict[str, Any], outputs: Instances
|
346 |
-
):
|
347 |
-
image_fpath = entry["file_name"]
|
348 |
-
logger.info(f"Processing {image_fpath}")
|
349 |
-
result = {"file_name": image_fpath}
|
350 |
-
if outputs.has("scores"):
|
351 |
-
result["scores"] = outputs.get("scores").cpu()
|
352 |
-
if outputs.has("pred_boxes"):
|
353 |
-
result["pred_boxes_XYXY"] = outputs.get("pred_boxes").tensor.cpu()
|
354 |
-
if outputs.has("pred_densepose"):
|
355 |
-
if isinstance(outputs.pred_densepose, DensePoseChartPredictorOutput):
|
356 |
-
extractor = DensePoseResultExtractor()
|
357 |
-
elif isinstance(outputs.pred_densepose, DensePoseEmbeddingPredictorOutput):
|
358 |
-
extractor = DensePoseOutputsExtractor()
|
359 |
-
result["pred_densepose"] = extractor(outputs)[0]
|
360 |
-
|
361 |
-
# decode
|
362 |
-
out_dir = context["out_fname"]
|
363 |
-
H, W, _ = entry['image'].shape
|
364 |
-
|
365 |
-
i = result['pred_densepose'][0].labels.cpu().numpy()
|
366 |
-
i_scale = (i.astype(np.float32) * 255 / 24).astype(np.uint8)
|
367 |
-
i_color = cv2.applyColorMap(i_scale, cv2.COLORMAP_PARULA)
|
368 |
-
i_color = cv2.cvtColor(i_color, cv2.COLOR_RGB2BGR)
|
369 |
-
i_color[i == 0] = [0, 0, 0]
|
370 |
-
|
371 |
-
box = result["pred_boxes_XYXY"][0]
|
372 |
-
box[2] = box[2] - box[0]
|
373 |
-
box[3] = box[3] - box[1]
|
374 |
-
x, y, w, h = [int(v) for v in box]
|
375 |
-
|
376 |
-
bg = np.zeros((H, W, 3))
|
377 |
-
bg[y:y + h, x:x + w, :] = i_color
|
378 |
-
|
379 |
-
bg_img = Image.fromarray(np.uint8(bg), "RGB")
|
380 |
-
os.makedirs(out_dir, exist_ok=True)
|
381 |
-
bg_img.save(os.path.join(out_dir, os.path.splitext(os.path.basename(result['file_name']))[0] + '.jpg'))
|
382 |
-
|
383 |
-
@classmethod
|
384 |
-
def create_context(cls, args: argparse.Namespace, cfg: CfgNode):
|
385 |
-
context = {"results": [], "out_fname": args.output}
|
386 |
-
return context
|
387 |
-
|
388 |
-
@classmethod
|
389 |
-
def postexecute(cls, context: Dict[str, Any]):
|
390 |
-
pass
|
391 |
-
|
392 |
-
|
393 |
def create_argument_parser() -> argparse.ArgumentParser:
|
394 |
parser = argparse.ArgumentParser(
|
395 |
description=DOC,
|
@@ -405,7 +345,7 @@ def create_argument_parser() -> argparse.ArgumentParser:
|
|
405 |
def main():
|
406 |
parser = create_argument_parser()
|
407 |
args = parser.parse_args()
|
408 |
-
verbosity =
|
409 |
global logger
|
410 |
logger = setup_logger(name=LOGGER_NAME)
|
411 |
logger.setLevel(verbosity_to_level(verbosity))
|
@@ -414,3 +354,6 @@ def main():
|
|
414 |
|
415 |
if __name__ == "__main__":
|
416 |
main()
|
|
|
|
|
|
|
|
7 |
import os
|
8 |
import sys
|
9 |
from typing import Any, ClassVar, Dict, List
|
|
|
|
|
|
|
10 |
import torch
|
11 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
from detectron2.config import CfgNode, get_cfg
|
13 |
from detectron2.data.detection_utils import read_image
|
14 |
from detectron2.engine.defaults import DefaultPredictor
|
15 |
from detectron2.structures.instances import Instances
|
16 |
from detectron2.utils.logger import setup_logger
|
17 |
+
|
18 |
+
from densepose import add_densepose_config
|
19 |
+
from densepose.structures import DensePoseChartPredictorOutput, DensePoseEmbeddingPredictorOutput
|
20 |
+
from densepose.utils.logger import verbosity_to_level
|
21 |
+
from densepose.vis.base import CompoundVisualizer
|
22 |
+
from densepose.vis.bounding_box import ScoredBoundingBoxVisualizer
|
23 |
+
from densepose.vis.densepose_outputs_vertex import (
|
24 |
+
DensePoseOutputsTextureVisualizer,
|
25 |
+
DensePoseOutputsVertexVisualizer,
|
26 |
+
get_texture_atlases,
|
27 |
+
)
|
28 |
+
from densepose.vis.densepose_results import (
|
29 |
+
DensePoseResultsContourVisualizer,
|
30 |
+
DensePoseResultsFineSegmentationVisualizer,
|
31 |
+
DensePoseResultsUVisualizer,
|
32 |
+
DensePoseResultsVVisualizer,
|
33 |
+
)
|
34 |
+
from densepose.vis.densepose_results_textures import (
|
35 |
+
DensePoseResultsVisualizerWithTexture,
|
36 |
+
get_texture_atlas,
|
37 |
+
)
|
38 |
+
from densepose.vis.extractor import (
|
39 |
+
CompoundExtractor,
|
40 |
+
DensePoseOutputsExtractor,
|
41 |
+
DensePoseResultExtractor,
|
42 |
+
create_extractor,
|
43 |
+
)
|
44 |
|
45 |
DOC = """Apply Net - a tool to print / visualize DensePose results
|
46 |
"""
|
|
|
51 |
_ACTION_REGISTRY: Dict[str, "Action"] = {}
|
52 |
|
53 |
|
54 |
+
class Action:
|
55 |
@classmethod
|
56 |
+
def add_arguments(cls: type, parser: argparse.ArgumentParser):
|
57 |
parser.add_argument(
|
58 |
"-v",
|
59 |
"--verbosity",
|
|
|
62 |
)
|
63 |
|
64 |
|
65 |
+
def register_action(cls: type):
|
66 |
"""
|
67 |
Decorator for action classes to automate action registration
|
68 |
"""
|
|
|
73 |
|
74 |
class InferenceAction(Action):
|
75 |
@classmethod
|
76 |
+
def add_arguments(cls: type, parser: argparse.ArgumentParser):
|
77 |
super(InferenceAction, cls).add_arguments(parser)
|
78 |
parser.add_argument("cfg", metavar="<config>", help="Config file")
|
79 |
parser.add_argument("model", metavar="<model>", help="Model file")
|
|
|
80 |
parser.add_argument(
|
81 |
"--opts",
|
82 |
help="Modify config options using the command-line 'KEY VALUE' pairs",
|
|
|
85 |
)
|
86 |
|
87 |
@classmethod
|
88 |
+
def execute(cls: type, args: argparse.Namespace, human_img):
|
89 |
logger.info(f"Loading config from {args.cfg}")
|
90 |
opts = []
|
91 |
cfg = cls.setup_config(args.cfg, args.model, args, opts)
|
92 |
logger.info(f"Loading model from {args.model}")
|
93 |
predictor = DefaultPredictor(cfg)
|
94 |
+
# logger.info(f"Loading data from {args.input}")
|
95 |
+
# file_list = cls._get_input_file_list(args.input)
|
96 |
+
# if len(file_list) == 0:
|
97 |
+
# logger.warning(f"No input images for {args.input}")
|
98 |
+
# return
|
99 |
context = cls.create_context(args, cfg)
|
100 |
+
# for file_name in file_list:
|
101 |
+
# img = read_image(file_name, format="BGR") # predictor expects BGR image.
|
102 |
+
with torch.no_grad():
|
103 |
+
outputs = predictor(human_img)["instances"]
|
104 |
+
out_pose = cls.execute_on_outputs(context, {"image": human_img}, outputs)
|
105 |
cls.postexecute(context)
|
106 |
+
return out_pose
|
107 |
|
108 |
@classmethod
|
109 |
def setup_config(
|
110 |
+
cls: type, config_fpath: str, model_fpath: str, args: argparse.Namespace, opts: List[str]
|
111 |
):
|
112 |
cfg = get_cfg()
|
113 |
add_densepose_config(cfg)
|
|
|
120 |
return cfg
|
121 |
|
122 |
@classmethod
|
123 |
+
def _get_input_file_list(cls: type, input_spec: str):
|
124 |
if os.path.isdir(input_spec):
|
125 |
file_list = [
|
126 |
os.path.join(input_spec, fname)
|
|
|
143 |
COMMAND: ClassVar[str] = "dump"
|
144 |
|
145 |
@classmethod
|
146 |
+
def add_parser(cls: type, subparsers: argparse._SubParsersAction):
|
147 |
parser = subparsers.add_parser(cls.COMMAND, help="Dump model outputs to a file.")
|
148 |
cls.add_arguments(parser)
|
149 |
parser.set_defaults(func=cls.execute)
|
150 |
|
151 |
@classmethod
|
152 |
+
def add_arguments(cls: type, parser: argparse.ArgumentParser):
|
153 |
super(DumpAction, cls).add_arguments(parser)
|
154 |
parser.add_argument(
|
155 |
"--output",
|
|
|
160 |
|
161 |
@classmethod
|
162 |
def execute_on_outputs(
|
163 |
+
cls: type, context: Dict[str, Any], entry: Dict[str, Any], outputs: Instances
|
164 |
):
|
165 |
image_fpath = entry["file_name"]
|
166 |
logger.info(f"Processing {image_fpath}")
|
|
|
178 |
context["results"].append(result)
|
179 |
|
180 |
@classmethod
|
181 |
+
def create_context(cls: type, args: argparse.Namespace, cfg: CfgNode):
|
182 |
context = {"results": [], "out_fname": args.output}
|
183 |
return context
|
184 |
|
185 |
@classmethod
|
186 |
+
def postexecute(cls: type, context: Dict[str, Any]):
|
187 |
out_fname = context["out_fname"]
|
188 |
out_dir = os.path.dirname(out_fname)
|
189 |
if len(out_dir) > 0 and not os.path.exists(out_dir):
|
|
|
212 |
}
|
213 |
|
214 |
@classmethod
|
215 |
+
def add_parser(cls: type, subparsers: argparse._SubParsersAction):
|
216 |
parser = subparsers.add_parser(cls.COMMAND, help="Visualize selected entries")
|
217 |
cls.add_arguments(parser)
|
218 |
parser.set_defaults(func=cls.execute)
|
219 |
|
220 |
@classmethod
|
221 |
+
def add_arguments(cls: type, parser: argparse.ArgumentParser):
|
222 |
super(ShowAction, cls).add_arguments(parser)
|
223 |
parser.add_argument(
|
224 |
"visualizations",
|
|
|
257 |
|
258 |
@classmethod
|
259 |
def setup_config(
|
260 |
+
cls: type, config_fpath: str, model_fpath: str, args: argparse.Namespace, opts: List[str]
|
261 |
):
|
262 |
opts.append("MODEL.ROI_HEADS.SCORE_THRESH_TEST")
|
263 |
opts.append(str(args.min_score))
|
|
|
269 |
|
270 |
@classmethod
|
271 |
def execute_on_outputs(
|
272 |
+
cls: type, context: Dict[str, Any], entry: Dict[str, Any], outputs: Instances
|
273 |
):
|
274 |
import cv2
|
275 |
import numpy as np
|
|
|
276 |
visualizer = context["visualizer"]
|
277 |
extractor = context["extractor"]
|
278 |
+
# image_fpath = entry["file_name"]
|
279 |
+
# logger.info(f"Processing {image_fpath}")
|
280 |
image = cv2.cvtColor(entry["image"], cv2.COLOR_BGR2GRAY)
|
281 |
image = np.tile(image[:, :, np.newaxis], [1, 1, 3])
|
282 |
data = extractor(outputs)
|
283 |
image_vis = visualizer.visualize(image, data)
|
284 |
+
|
285 |
+
return image_vis
|
286 |
entry_idx = context["entry_idx"] + 1
|
287 |
+
out_fname = './image-densepose/' + image_fpath.split('/')[-1]
|
288 |
+
out_dir = './image-densepose'
|
289 |
out_dir = os.path.dirname(out_fname)
|
290 |
if len(out_dir) > 0 and not os.path.exists(out_dir):
|
291 |
os.makedirs(out_dir)
|
|
|
294 |
context["entry_idx"] += 1
|
295 |
|
296 |
@classmethod
|
297 |
+
def postexecute(cls: type, context: Dict[str, Any]):
|
298 |
pass
|
299 |
+
# python ./apply_net.py show ./configs/densepose_rcnn_R_50_FPN_s1x.yaml https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_s1x/165712039/model_final_162be9.pkl /home/alin0222/DressCode/upper_body/images dp_segm -v --opts MODEL.DEVICE cpu
|
300 |
|
301 |
@classmethod
|
302 |
+
def _get_out_fname(cls: type, entry_idx: int, fname_base: str):
|
303 |
base, ext = os.path.splitext(fname_base)
|
304 |
return base + ".{0:04d}".format(entry_idx) + ext
|
305 |
|
306 |
@classmethod
|
307 |
+
def create_context(cls: type, args: argparse.Namespace, cfg: CfgNode) -> Dict[str, Any]:
|
308 |
vis_specs = args.visualizations.split(",")
|
309 |
visualizers = []
|
310 |
extractors = []
|
|
|
330 |
return context
|
331 |
|
332 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
333 |
def create_argument_parser() -> argparse.ArgumentParser:
|
334 |
parser = argparse.ArgumentParser(
|
335 |
description=DOC,
|
|
|
345 |
def main():
|
346 |
parser = create_argument_parser()
|
347 |
args = parser.parse_args()
|
348 |
+
verbosity = getattr(args, "verbosity", None)
|
349 |
global logger
|
350 |
logger = setup_logger(name=LOGGER_NAME)
|
351 |
logger.setLevel(verbosity_to_level(verbosity))
|
|
|
354 |
|
355 |
if __name__ == "__main__":
|
356 |
main()
|
357 |
+
|
358 |
+
|
359 |
+
# python ./apply_net.py show ./configs/densepose_rcnn_R_50_FPN_s1x.yaml https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_s1x/165712039/model_final_162be9.pkl /home/alin0222/Dresscode/dresses/humanonly dp_segm -v --opts MODEL.DEVICE cuda
|
densepose
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
preprocess/detectron2/projects/DensePose/densepose/
|
|
|
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/__init__.py
RENAMED
@@ -1,6 +1,4 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
|
3 |
-
# pyre-unsafe
|
4 |
from .data.datasets import builtin # just to register data
|
5 |
from .converters import builtin as builtin_converters # register converters
|
6 |
from .config import (
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
|
|
|
|
2 |
from .data.datasets import builtin # just to register data
|
3 |
from .converters import builtin as builtin_converters # register converters
|
4 |
from .config import (
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/config.py
RENAMED
File without changes
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/converters/__init__.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
from .hflip import HFlipConverter
|
6 |
from .to_mask import ToMaskConverter
|
7 |
from .to_chart_result import ToChartResultConverter, ToChartResultConverterWithConfidences
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
from .hflip import HFlipConverter
|
4 |
from .to_mask import ToMaskConverter
|
5 |
from .to_chart_result import ToChartResultConverter, ToChartResultConverterWithConfidences
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/converters/base.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
from typing import Any, Tuple, Type
|
6 |
import torch
|
7 |
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
from typing import Any, Tuple, Type
|
4 |
import torch
|
5 |
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/converters/builtin.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
from ..structures import DensePoseChartPredictorOutput, DensePoseEmbeddingPredictorOutput
|
6 |
from . import (
|
7 |
HFlipConverter,
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
from ..structures import DensePoseChartPredictorOutput, DensePoseEmbeddingPredictorOutput
|
4 |
from . import (
|
5 |
HFlipConverter,
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/converters/chart_output_hflip.py
RENAMED
@@ -1,6 +1,4 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
|
3 |
-
# pyre-unsafe
|
4 |
from dataclasses import fields
|
5 |
import torch
|
6 |
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
|
|
|
|
2 |
from dataclasses import fields
|
3 |
import torch
|
4 |
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/converters/chart_output_to_chart_result.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
from typing import Dict
|
6 |
import torch
|
7 |
from torch.nn import functional as F
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
from typing import Dict
|
4 |
import torch
|
5 |
from torch.nn import functional as F
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/converters/hflip.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
from typing import Any
|
6 |
|
7 |
from .base import BaseConverter
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
from typing import Any
|
4 |
|
5 |
from .base import BaseConverter
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/converters/segm_to_mask.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
from typing import Any
|
6 |
import torch
|
7 |
from torch.nn import functional as F
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
from typing import Any
|
4 |
import torch
|
5 |
from torch.nn import functional as F
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/converters/to_chart_result.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
from typing import Any
|
6 |
|
7 |
from detectron2.structures import Boxes
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
from typing import Any
|
4 |
|
5 |
from detectron2.structures import Boxes
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/converters/to_mask.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
from typing import Any, Tuple
|
6 |
|
7 |
from detectron2.structures import BitMasks, Boxes
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
from typing import Any, Tuple
|
4 |
|
5 |
from detectron2.structures import BitMasks, Boxes
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/__init__.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
from .meshes import builtin
|
6 |
from .build import (
|
7 |
build_detection_test_loader,
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
from .meshes import builtin
|
4 |
from .build import (
|
5 |
build_detection_test_loader,
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/build.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
import itertools
|
6 |
import logging
|
7 |
import numpy as np
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
import itertools
|
4 |
import logging
|
5 |
import numpy as np
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/combined_loader.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
import random
|
6 |
from collections import deque
|
7 |
from typing import Any, Collection, Deque, Iterable, Iterator, List, Sequence
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
import random
|
4 |
from collections import deque
|
5 |
from typing import Any, Collection, Deque, Iterable, Iterator, List, Sequence
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/dataset_mapper.py
RENAMED
@@ -1,8 +1,6 @@
|
|
1 |
# -*- coding: utf-8 -*-
|
2 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
|
4 |
-
# pyre-unsafe
|
5 |
-
|
6 |
import copy
|
7 |
import logging
|
8 |
from typing import Any, Dict, List, Tuple
|
|
|
1 |
# -*- coding: utf-8 -*-
|
2 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
|
|
|
|
|
4 |
import copy
|
5 |
import logging
|
6 |
from typing import Any, Dict, List, Tuple
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/datasets/__init__.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
from . import builtin # ensure the builtin datasets are registered
|
6 |
|
7 |
__all__ = [k for k in globals().keys() if "builtin" not in k and not k.startswith("_")]
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
from . import builtin # ensure the builtin datasets are registered
|
4 |
|
5 |
__all__ = [k for k in globals().keys() if "builtin" not in k and not k.startswith("_")]
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/datasets/builtin.py
RENAMED
@@ -1,6 +1,4 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
|
3 |
-
# pyre-unsafe
|
4 |
from .chimpnsee import register_dataset as register_chimpnsee_dataset
|
5 |
from .coco import BASE_DATASETS as BASE_COCO_DATASETS
|
6 |
from .coco import DATASETS as COCO_DATASETS
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
|
|
|
|
2 |
from .chimpnsee import register_dataset as register_chimpnsee_dataset
|
3 |
from .coco import BASE_DATASETS as BASE_COCO_DATASETS
|
4 |
from .coco import DATASETS as COCO_DATASETS
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/datasets/chimpnsee.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
from typing import Optional
|
6 |
|
7 |
from detectron2.data import DatasetCatalog, MetadataCatalog
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
from typing import Optional
|
4 |
|
5 |
from detectron2.data import DatasetCatalog, MetadataCatalog
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/datasets/coco.py
RENAMED
@@ -1,6 +1,4 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
|
3 |
-
# pyre-unsafe
|
4 |
import contextlib
|
5 |
import io
|
6 |
import logging
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
|
|
|
|
2 |
import contextlib
|
3 |
import io
|
4 |
import logging
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/datasets/dataset_type.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
from enum import Enum
|
6 |
|
7 |
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
from enum import Enum
|
4 |
|
5 |
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/datasets/lvis.py
RENAMED
@@ -1,6 +1,4 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
|
3 |
-
# pyre-unsafe
|
4 |
import logging
|
5 |
import os
|
6 |
from typing import Any, Dict, Iterable, List, Optional
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
|
|
|
|
2 |
import logging
|
3 |
import os
|
4 |
from typing import Any, Dict, Iterable, List, Optional
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/image_list_dataset.py
RENAMED
@@ -1,8 +1,6 @@
|
|
1 |
# -*- coding: utf-8 -*-
|
2 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
|
4 |
-
# pyre-unsafe
|
5 |
-
|
6 |
import logging
|
7 |
import numpy as np
|
8 |
from typing import Any, Callable, Dict, List, Optional, Union
|
|
|
1 |
# -*- coding: utf-8 -*-
|
2 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
|
|
|
|
|
4 |
import logging
|
5 |
import numpy as np
|
6 |
from typing import Any, Callable, Dict, List, Optional, Union
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/inference_based_loader.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
import random
|
6 |
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple
|
7 |
import torch
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
import random
|
4 |
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple
|
5 |
import torch
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/meshes/__init__.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
from . import builtin
|
6 |
|
7 |
__all__ = [k for k in globals().keys() if "builtin" not in k and not k.startswith("_")]
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
|
|
|
|
|
3 |
from . import builtin
|
4 |
|
5 |
__all__ = [k for k in globals().keys() if "builtin" not in k and not k.startswith("_")]
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/meshes/builtin.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
from .catalog import MeshInfo, register_meshes
|
6 |
|
7 |
DENSEPOSE_MESHES_DIR = "https://dl.fbaipublicfiles.com/densepose/meshes/"
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
|
|
|
|
|
3 |
from .catalog import MeshInfo, register_meshes
|
4 |
|
5 |
DENSEPOSE_MESHES_DIR = "https://dl.fbaipublicfiles.com/densepose/meshes/"
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/meshes/catalog.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
import logging
|
6 |
from collections import UserDict
|
7 |
from dataclasses import dataclass
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
|
|
|
|
|
3 |
import logging
|
4 |
from collections import UserDict
|
5 |
from dataclasses import dataclass
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/samplers/__init__.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
from .densepose_uniform import DensePoseUniformSampler
|
6 |
from .densepose_confidence_based import DensePoseConfidenceBasedSampler
|
7 |
from .densepose_cse_uniform import DensePoseCSEUniformSampler
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
from .densepose_uniform import DensePoseUniformSampler
|
4 |
from .densepose_confidence_based import DensePoseConfidenceBasedSampler
|
5 |
from .densepose_cse_uniform import DensePoseCSEUniformSampler
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/samplers/densepose_base.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
from typing import Any, Dict, List, Tuple
|
6 |
import torch
|
7 |
from torch.nn import functional as F
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
from typing import Any, Dict, List, Tuple
|
4 |
import torch
|
5 |
from torch.nn import functional as F
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/samplers/densepose_confidence_based.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
import random
|
6 |
from typing import Optional, Tuple
|
7 |
import torch
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
import random
|
4 |
from typing import Optional, Tuple
|
5 |
import torch
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/samplers/densepose_cse_base.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
from typing import Any, Dict, List, Tuple
|
6 |
import torch
|
7 |
from torch.nn import functional as F
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
from typing import Any, Dict, List, Tuple
|
4 |
import torch
|
5 |
from torch.nn import functional as F
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/samplers/densepose_cse_confidence_based.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
import random
|
6 |
from typing import Optional, Tuple
|
7 |
import torch
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
import random
|
4 |
from typing import Optional, Tuple
|
5 |
import torch
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/samplers/densepose_cse_uniform.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
from .densepose_cse_base import DensePoseCSEBaseSampler
|
6 |
from .densepose_uniform import DensePoseUniformSampler
|
7 |
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
from .densepose_cse_base import DensePoseCSEBaseSampler
|
4 |
from .densepose_uniform import DensePoseUniformSampler
|
5 |
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/samplers/densepose_uniform.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
import random
|
6 |
import torch
|
7 |
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
import random
|
4 |
import torch
|
5 |
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/samplers/mask_from_densepose.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
from detectron2.structures import BitMasks, Instances
|
6 |
|
7 |
from densepose.converters import ToMaskConverter
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
from detectron2.structures import BitMasks, Instances
|
4 |
|
5 |
from densepose.converters import ToMaskConverter
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/samplers/prediction_to_gt.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
from dataclasses import dataclass
|
6 |
from typing import Any, Callable, Dict, List, Optional
|
7 |
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
from dataclasses import dataclass
|
4 |
from typing import Any, Callable, Dict, List, Optional
|
5 |
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/transform/__init__.py
RENAMED
@@ -1,5 +1,3 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
from .image import ImageResizeTransform
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
from .image import ImageResizeTransform
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/transform/image.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
import torch
|
6 |
|
7 |
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
import torch
|
4 |
|
5 |
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/utils.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
import os
|
6 |
from typing import Dict, Optional
|
7 |
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
import os
|
4 |
from typing import Dict, Optional
|
5 |
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/video/__init__.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
from .frame_selector import (
|
6 |
FrameSelectionStrategy,
|
7 |
RandomKFramesSelector,
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
from .frame_selector import (
|
4 |
FrameSelectionStrategy,
|
5 |
RandomKFramesSelector,
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/video/frame_selector.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
import random
|
6 |
from collections.abc import Callable
|
7 |
from enum import Enum
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
import random
|
4 |
from collections.abc import Callable
|
5 |
from enum import Enum
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/data/video/video_keyframe_dataset.py
RENAMED
@@ -1,8 +1,6 @@
|
|
1 |
# -*- coding: utf-8 -*-
|
2 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
|
4 |
-
# pyre-unsafe
|
5 |
-
|
6 |
import csv
|
7 |
import logging
|
8 |
import numpy as np
|
|
|
1 |
# -*- coding: utf-8 -*-
|
2 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
|
|
|
|
|
4 |
import csv
|
5 |
import logging
|
6 |
import numpy as np
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/engine/__init__.py
RENAMED
@@ -1,5 +1,3 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
from .trainer import Trainer
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
from .trainer import Trainer
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/engine/trainer.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
import logging
|
6 |
import os
|
7 |
from collections import OrderedDict
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
|
|
|
|
|
3 |
import logging
|
4 |
import os
|
5 |
from collections import OrderedDict
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/evaluation/__init__.py
RENAMED
@@ -1,5 +1,3 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
from .evaluator import DensePoseCOCOEvaluator
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
from .evaluator import DensePoseCOCOEvaluator
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/evaluation/d2_evaluator_adapter.py
RENAMED
@@ -1,7 +1,5 @@
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
3 |
-
# pyre-unsafe
|
4 |
-
|
5 |
from detectron2.data.catalog import Metadata
|
6 |
from detectron2.evaluation import COCOEvaluator
|
7 |
|
|
|
1 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
|
|
|
|
|
3 |
from detectron2.data.catalog import Metadata
|
4 |
from detectron2.evaluation import COCOEvaluator
|
5 |
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/evaluation/densepose_coco_evaluation.py
RENAMED
@@ -5,8 +5,6 @@
|
|
5 |
# LICENSE file in the root directory of this source tree.
|
6 |
# This is a modified version of cocoeval.py where we also have the densepose evaluation.
|
7 |
|
8 |
-
# pyre-unsafe
|
9 |
-
|
10 |
__author__ = "tsungyi"
|
11 |
|
12 |
import copy
|
@@ -18,7 +16,6 @@ import time
|
|
18 |
from collections import defaultdict
|
19 |
from enum import Enum
|
20 |
from typing import Any, Dict, Tuple
|
21 |
-
# pyre-fixme[21]: Could not find module `scipy.spatial.distance`.
|
22 |
import scipy.spatial.distance as ssd
|
23 |
import torch
|
24 |
import torch.nn.functional as F
|
|
|
5 |
# LICENSE file in the root directory of this source tree.
|
6 |
# This is a modified version of cocoeval.py where we also have the densepose evaluation.
|
7 |
|
|
|
|
|
8 |
__author__ = "tsungyi"
|
9 |
|
10 |
import copy
|
|
|
16 |
from collections import defaultdict
|
17 |
from enum import Enum
|
18 |
from typing import Any, Dict, Tuple
|
|
|
19 |
import scipy.spatial.distance as ssd
|
20 |
import torch
|
21 |
import torch.nn.functional as F
|
{preprocess/detectron2/projects/DensePose/densepose β densepose}/evaluation/evaluator.py
RENAMED
@@ -1,8 +1,6 @@
|
|
1 |
# -*- coding: utf-8 -*-
|
2 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
|
4 |
-
# pyre-unsafe
|
5 |
-
|
6 |
import contextlib
|
7 |
import copy
|
8 |
import io
|
|
|
1 |
# -*- coding: utf-8 -*-
|
2 |
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
|
|
|
|
|
4 |
import contextlib
|
5 |
import copy
|
6 |
import io
|