mambazjp commited on
Commit
8870024
1 Parent(s): 355b5d6

Upload 82 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +9 -0
  2. videoavatars/.github/ISSUE_TEMPLATE/new-issue.md +13 -0
  3. videoavatars/.gitignore +6 -0
  4. videoavatars/README.md +89 -0
  5. videoavatars/assets/b2m_f.npy +0 -0
  6. videoavatars/assets/b2m_m.npy +0 -0
  7. videoavatars/assets/basicModel_ft.npy +0 -0
  8. videoavatars/assets/basicModel_vt.npy +0 -0
  9. videoavatars/assets/bodyparts.pkl +0 -0
  10. videoavatars/assets/prior_a_pose.pkl +0 -0
  11. videoavatars/get_reconstructed_poses.py +56 -0
  12. videoavatars/get_reconstructed_poses.sh +4 -0
  13. videoavatars/lib/__init__.py +3 -0
  14. videoavatars/lib/__init__.pyc +0 -0
  15. videoavatars/lib/ch.py +34 -0
  16. videoavatars/lib/ch.pyc +0 -0
  17. videoavatars/lib/frame.py +48 -0
  18. videoavatars/lib/frame.pyc +0 -0
  19. videoavatars/lib/geometry.py +55 -0
  20. videoavatars/lib/geometry.pyc +0 -0
  21. videoavatars/lib/rays.py +99 -0
  22. videoavatars/lib/rays.pyc +0 -0
  23. videoavatars/models/__init__.py +2 -0
  24. videoavatars/models/__init__.pyc +0 -0
  25. videoavatars/models/bodyparts.py +76 -0
  26. videoavatars/models/bodyparts.pyc +0 -0
  27. videoavatars/models/smpl.py +209 -0
  28. videoavatars/models/smpl.pyc +0 -0
  29. videoavatars/prepare_data/2djoints2hdf5.py +38 -0
  30. videoavatars/prepare_data/create_camera.py +56 -0
  31. videoavatars/prepare_data/masks2hdf5.py +42 -0
  32. videoavatars/render/__init__.py +3 -0
  33. videoavatars/render/camera.py +38 -0
  34. videoavatars/render/renderer.py +188 -0
  35. videoavatars/requirements.txt +8 -0
  36. videoavatars/run_step1.sh +27 -0
  37. videoavatars/run_step2.sh +15 -0
  38. videoavatars/run_step3.sh +15 -0
  39. videoavatars/step1_pose.py +377 -0
  40. videoavatars/step2_consensus.py +204 -0
  41. videoavatars/step3_texture.py +200 -0
  42. videoavatars/tex/__init__.py +2 -0
  43. videoavatars/tex/iso.py +57 -0
  44. videoavatars/util/__init__.py +2 -0
  45. videoavatars/util/__init__.pyc +0 -0
  46. videoavatars/util/im.py +43 -0
  47. videoavatars/util/im.pyc +0 -0
  48. videoavatars/util/logger.py +12 -0
  49. videoavatars/util/logger.pyc +0 -0
  50. videoavatars/util/mesh.py +40 -0
.gitattributes CHANGED
@@ -5,3 +5,12 @@ RobustVideoMatting/documentation/image/showreel.gif filter=lfs diff=lfs merge=lf
5
  RobustVideoMatting/documentation/image/teaser.gif filter=lfs diff=lfs merge=lfs -text
6
  RobustVideoMatting/rvm_mobilenetv3.pth filter=lfs diff=lfs merge=lfs -text
7
  pifuhd/checkpoints/pifuhd.pt filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
5
  RobustVideoMatting/documentation/image/teaser.gif filter=lfs diff=lfs merge=lfs -text
6
  RobustVideoMatting/rvm_mobilenetv3.pth filter=lfs diff=lfs merge=lfs -text
7
  pifuhd/checkpoints/pifuhd.pt filter=lfs diff=lfs merge=lfs -text
8
+ videoavatars/vendor/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl filter=lfs diff=lfs merge=lfs -text
9
+ videoavatars/vendor/smpl/models/basicmodel_f_lbs_10_207_0_v1.1.0.pkl filter=lfs diff=lfs merge=lfs -text
10
+ videoavatars/vendor/smpl/models/basicmodel_m_lbs_10_207_0_v1.0.0.pkl filter=lfs diff=lfs merge=lfs -text
11
+ videoavatars/vendor/smpl/models/basicmodel_m_lbs_10_207_0_v1.1.0.pkl filter=lfs diff=lfs merge=lfs -text
12
+ videoavatars/vendor/smpl/models/basicmodel_neutral_lbs_10_207_0_v1.1.0.pkl filter=lfs diff=lfs merge=lfs -text
13
+ videoavatars/vendor/smplify/models/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl filter=lfs diff=lfs merge=lfs -text
14
+ videoavatars/vendor/smplify/models/regressors_locked_normalized_female.npz filter=lfs diff=lfs merge=lfs -text
15
+ videoavatars/vendor/smplify/models/regressors_locked_normalized_hybrid.npz filter=lfs diff=lfs merge=lfs -text
16
+ videoavatars/vendor/smplify/models/regressors_locked_normalized_male.npz filter=lfs diff=lfs merge=lfs -text
videoavatars/.github/ISSUE_TEMPLATE/new-issue.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ name: New issue
3
+ about: Default issue template.
4
+ title: ''
5
+ labels: ''
6
+ assignees: ''
7
+
8
+ ---
9
+
10
+ - [ ] I have read the paper **Video based reconstruction of 3D people models**.
11
+ - [ ] I have read the license.
12
+ - [ ] I understand that the authors are under no obligation to reply to comprehension questions regarding the paper or the provided code.
13
+ - [ ] The issue occurs in the provided code and not in third-party libraries (SMPL, chumpy, OpenDR, ...)
videoavatars/.gitignore ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ .idea/
2
+ .DS_Store
3
+
4
+ *.pyc
5
+ vendor/*
6
+ !vendor/__init__.py
videoavatars/README.md ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Video Avatars
2
+
3
+ This repository contains code corresponding to the paper **Video based reconstruction of 3D people models**.
4
+
5
+ ## Installation
6
+
7
+ Download and unpack the SMPL model from here: http://smpl.is.tue.mpg.de/.
8
+
9
+ Download and unpack the SMPLify Code from here: http://smplify.is.tue.mpg.de/
10
+
11
+
12
+ ```
13
+ git clone https://github.com/thmoa/videoavatars.git videoavatars
14
+ cd videoavatars/vendor
15
+ mkdir smpl
16
+ cd smpl
17
+ ln -s <path to smpl folder>/models .
18
+ ln -s <path to smpl folder>/smpl_webuser/*.py .
19
+ cd ..
20
+
21
+ mkdir smplify
22
+ cd smplify
23
+ touch __init__.py
24
+ ln -s <path to your smplify installation>/code/models .
25
+ # this file needs to be copied!
26
+ cp <path to your smplify installation>/code/lib/sphere_collisions.py .
27
+ # these files can be linked
28
+ ln -s <path to smplify folder>/code/lib/capsule_body.py .
29
+ ln -s <path to smplify folder>/code/lib/capsule_ch.py .
30
+ ln -s <path to smplify folder>/code/lib/robustifiers.py .
31
+ ```
32
+
33
+ Change line 14 in `vendor/smplify/sphere_collisions.py` to
34
+ ```
35
+ from vendor.smpl.lbs import verts_core
36
+ ```
37
+
38
+ ## Usage
39
+
40
+ The software consists of three parts:
41
+
42
+ 1. `step1_pose.py`: pose reconstruction
43
+ 2. `step2_consensus.py`: consensus shape optimization
44
+ 3. `step3_texture.py`: texture calculation
45
+
46
+ Starting the scripts will display usage information and options.
47
+ Additionally, we provide helper bash scripts for easier processing of our dataset.
48
+
49
+ The scripts in `prepare_data` might help you to process your own data.
50
+
51
+ ## Citation
52
+
53
+ This repository contains code corresponding to:
54
+
55
+ T. Alldieck, M. Magnor, W. Xu, C. Theobalt, and G. Pons-
56
+ Moll. **Video based reconstruction of 3D people models**. In
57
+ *IEEE Conference on Computer Vision and Pattern Recognition*, 2018.
58
+
59
+ Please cite as:
60
+
61
+ ```
62
+ @inproceedings{alldieck2018video,
63
+ title = {Video Based Reconstruction of 3D People Models},
64
+ author = {Alldieck, Thiemo and Magnor, Marcus and Xu, Weipeng and Theobalt, Christian and Pons-Moll, Gerard},
65
+ booktitle = {{IEEE} Conference on Computer Vision and Pattern Recognition},
66
+ year = {2018}
67
+ }
68
+ ```
69
+
70
+
71
+ ## Dataset
72
+
73
+ The accompanied dataset can be downloaded here: https://graphics.tu-bs.de/people-snapshot
74
+
75
+ ## License
76
+
77
+ Copyright (c) 2018 Thiemo Alldieck, Technische Universität Braunschweig, Max-Planck-Gesellschaft
78
+
79
+ **Please read carefully the following terms and conditions and any accompanying documentation before you download and/or use this software and associated documentation files (the "Software").**
80
+
81
+ The authors hereby grant you a non-exclusive, non-transferable, free of charge right to copy, modify, merge, publish, distribute, and sublicense the Software for the sole purpose of performing non-commercial scientific research, non-commercial education, or non-commercial artistic projects.
82
+
83
+ Any other use, in particular any use for commercial purposes, is prohibited. This includes, without limitation, incorporation in a commercial product, use in a commercial service, or production of other artefacts for commercial purposes.
84
+
85
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
86
+
87
+ You understand and agree that the authors are under no obligation to provide either maintenance services, update services, notices of latent defects, or corrections of defects with regard to the Software. The authors nevertheless reserve the right to update, modify, or discontinue the Software at any time.
88
+
89
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. You agree to cite the **Video Based Reconstruction of 3D People Models** paper in documents and papers that report on research using this Software.
videoavatars/assets/b2m_f.npy ADDED
Binary file (216 Bytes). View file
 
videoavatars/assets/b2m_m.npy ADDED
Binary file (216 Bytes). View file
 
videoavatars/assets/basicModel_ft.npy ADDED
Binary file (331 kB). View file
 
videoavatars/assets/basicModel_vt.npy ADDED
Binary file (121 kB). View file
 
videoavatars/assets/bodyparts.pkl ADDED
Binary file (36.1 kB). View file
 
videoavatars/assets/prior_a_pose.pkl ADDED
Binary file (38.8 kB). View file
 
videoavatars/get_reconstructed_poses.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ @Author: Jiapeng Zhou
3
+ @Date: 2023-05-26 15:55:30
4
+ @Description: This is to get the smpl params "reconstructed_poses.hdf5" from videoavatar.
5
+ Usage: conda activate videoavatar && pythonn get_reconstructed_poses.py --root ~/Deexxx/xinyu_a --out videoavatars --gender xxx
6
+ '''
7
+ import argparse
8
+ import os, os.path as osp, sys
9
+ import pdb
10
+
11
+
12
+ def parse_args():
13
+ parser = argparse.ArgumentParser()
14
+ parser.add_argument('--root', type=str)
15
+ parser.add_argument('--out', type=str, default='videoavatars')
16
+ parser.add_argument('--gender', type=str, choices=['male', 'female'],help=' "male" or "female" run_step1.sh needs to select smpl model of different gender')
17
+ args = parser.parse_args()
18
+ return args
19
+
20
+ def main():
21
+ args = parse_args()
22
+ output_dir = osp.join(args.root, args.out)
23
+ #os.makedirs(output_dir, exist_ok=True)
24
+ # openpose => keypoints.hdf5
25
+ output_pth =osp.join(output_dir, 'keypoints.hdf5')
26
+ if not osp.isfile(output_pth):
27
+ os.system('python ./prepare_data/2djoints2hdf5.py {} {}'.\
28
+ format(osp.join(args.root, 'openpose'), output_pth) )
29
+
30
+ # masks => masks.hdf5
31
+ output_pth = osp.join(output_dir, 'masks.hdf5')
32
+ if not osp.isfile(output_pth):
33
+ os.system('python ./prepare_data/masks2hdf5.py {} {}'.\
34
+ format(osp.join(args.root, 'masks'), output_pth))
35
+
36
+ # camera => camera.hdf5
37
+ # TODO: the f and c is given by the camera intrinsics
38
+ output_pth = osp.join(output_dir, 'camera.pkl')
39
+ '''the compute formula:
40
+ fx2 = fy, fy2 = fx, cx2 = 1080 - cy, cy2 = cx
41
+ '''
42
+ if not osp.isfile(output_pth):
43
+ # os.system('python ./prepare_data/create_camera.py \
44
+ # {} 1080 1920 -f 914.87 915.067 -c 540.63 958.13 '\
45
+ # .format(output_pth))
46
+ os.system('python ./prepare_data/create_camera.py \
47
+ {} 1080 1920 '\
48
+ .format(output_pth))
49
+
50
+ # get reconstructed_poses.hdf5
51
+ # TODO: the smpl model is needs to be selected according to gender
52
+ os.system('bash run_step1.sh {} {} {}'.format(output_dir, output_dir, args.gender))
53
+ return
54
+
55
+ if __name__ == '__main__':
56
+ main()
videoavatars/get_reconstructed_poses.sh ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # 如果用python调用这个bash脚本 os.system('bash xxx.sh'),会直接报错commandNotFoundError
2
+ conda activate videoavatar
3
+ python get_reconstructed_poses.py --root $1 --out $2 --gender $3
4
+ caself
videoavatars/lib/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ #!/usr/bin/env python2
2
+ # -*- coding: utf-8 -*-
3
+
videoavatars/lib/__init__.pyc ADDED
Binary file (162 Bytes). View file
 
videoavatars/lib/ch.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python2
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import numpy as np
5
+ import chumpy as ch
6
+ import scipy.sparse as sp
7
+
8
+ from chumpy.utils import col
9
+
10
+
11
+ class sp_dot(ch.Ch):
12
+ terms = 'a',
13
+ dterms = 'b',
14
+
15
+ def compute_r(self):
16
+ return self.a.dot(self.b.r)
17
+
18
+ def compute(self):
19
+
20
+ # To stay consistent with numpy, we must upgrade 1D arrays to 2D
21
+ ar = sp.csr_matrix((self.a.data, self.a.indices, self.a.indptr),
22
+ shape=(max(np.sum(self.a.shape[:-1]), 1), self.a.shape[-1]))
23
+ br = col(self.b.r) if len(self.b.r.shape) < 2 else self.b.r.reshape((self.b.r.shape[0], -1))
24
+
25
+ if br.ndim <= 1:
26
+ return ar
27
+ elif br.ndim <= 2:
28
+ return sp.kron(ar, sp.eye(br.shape[1], br.shape[1]))
29
+ else:
30
+ raise NotImplementedError
31
+
32
+ def compute_dr_wrt(self, wrt):
33
+ if wrt is self.b:
34
+ return self.compute()
videoavatars/lib/ch.pyc ADDED
Binary file (1.71 kB). View file
 
videoavatars/lib/frame.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python2
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import numpy as np
5
+
6
+ from util.logger import log
7
+ from lib.rays import rays_from_silh
8
+ from models.smpl import model_params_in_camera_coords
9
+
10
+ sess = None
11
+
12
+
13
+ class FrameData(object):
14
+ pass
15
+
16
+
17
+ def batch_invert(x):
18
+ try:
19
+ import tensorflow as tf
20
+ global sess
21
+
22
+ tx = tf.convert_to_tensor(x, dtype=tf.float32)
23
+ txi = tf.transpose(tf.matrix_inverse(tf.transpose(tx)))
24
+
25
+ if sess is None:
26
+ sess = tf.Session()
27
+
28
+ return sess.run(txi)
29
+
30
+ except ImportError:
31
+ log.info('Could not load tensorflow. Falling back to matrix inversion with numpy (slower).')
32
+
33
+ return np.asarray([np.linalg.inv(t) for t in x.T]).T
34
+
35
+
36
+ def setup_frame_rays(base_smpl, camera, camera_t, camera_rt, pose, trans, mask):
37
+ f = FrameData()
38
+
39
+ f.trans, f.pose = model_params_in_camera_coords(trans, pose, base_smpl.J[0], camera_t, camera_rt)
40
+ f.mask = mask
41
+
42
+ base_smpl.pose[:] = f.pose
43
+ camera.t[:] = f.trans
44
+
45
+ f.Vi = batch_invert(base_smpl.V.r)
46
+ f.rays = rays_from_silh(f.mask, camera)
47
+
48
+ return f
videoavatars/lib/frame.pyc ADDED
Binary file (1.83 kB). View file
 
videoavatars/lib/geometry.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python2
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import cv2
5
+ import numpy as np
6
+ from scipy import sparse as sp
7
+
8
+
9
+ def visible_boundary_edges(rn_b, rn_m):
10
+ visibility = rn_b.boundaryid_image
11
+
12
+ silh = rn_m.r
13
+ sobelx = cv2.Sobel(silh, cv2.CV_64F, 1, 0, ksize=3)
14
+ sobely = cv2.Sobel(silh, cv2.CV_64F, 0, 1, ksize=3)
15
+
16
+ mag = (sobelx ** 2 + sobely ** 2) > 0
17
+
18
+ visibility[mag == 0] = 4294967295
19
+ visible = np.nonzero(visibility.ravel() != 4294967295)[0]
20
+
21
+ return np.unique(visibility.ravel()[visible])
22
+
23
+
24
+ def visible_boundary_edge_verts(rn_b, rn_m):
25
+ visible_edge_ids = visible_boundary_edges(rn_b, rn_m)
26
+
27
+ vpe = rn_b.primitives_per_edge[1]
28
+ verts = np.unique(vpe[visible_edge_ids].ravel())
29
+
30
+ return verts
31
+
32
+
33
+ def laplacian(v, f):
34
+ n = len(v)
35
+
36
+ v_a = f[:, 0]
37
+ v_b = f[:, 1]
38
+ v_c = f[:, 2]
39
+
40
+ ab = v[v_a] - v[v_b]
41
+ bc = v[v_b] - v[v_c]
42
+ ca = v[v_c] - v[v_a]
43
+
44
+ cot_a = -1 * (ab * ca).sum(axis=1) / np.sqrt(np.sum(np.cross(ab, ca) ** 2, axis=-1))
45
+ cot_b = -1 * (bc * ab).sum(axis=1) / np.sqrt(np.sum(np.cross(bc, ab) ** 2, axis=-1))
46
+ cot_c = -1 * (ca * bc).sum(axis=1) / np.sqrt(np.sum(np.cross(ca, bc) ** 2, axis=-1))
47
+
48
+ I = np.concatenate((v_a, v_c, v_a, v_b, v_b, v_c))
49
+ J = np.concatenate((v_c, v_a, v_b, v_a, v_c, v_b))
50
+ W = 0.5 * np.concatenate((cot_b, cot_b, cot_c, cot_c, cot_a, cot_a))
51
+
52
+ L = sp.csr_matrix((W, (I, J)), shape=(n, n))
53
+ L = L - sp.spdiags(L * np.ones(n), 0, n, n)
54
+
55
+ return L
videoavatars/lib/geometry.pyc ADDED
Binary file (2.2 kB). View file
 
videoavatars/lib/rays.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python2
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import cv2
5
+ import numpy as np
6
+ import chumpy as ch
7
+
8
+ from lib.geometry import visible_boundary_edge_verts
9
+ from vendor.smplify.robustifiers import GMOf
10
+
11
+
12
+ def plucker(rays):
13
+ p = rays[:, 0]
14
+ n = rays[:, 1] - rays[:, 0]
15
+ n /= np.linalg.norm(n, axis=1).reshape(-1, 1)
16
+ m = np.cross(p, n, axisa=1, axisb=1)
17
+
18
+ return n, m
19
+
20
+
21
+ def distance_function(rays, verts):
22
+ n, m = plucker(rays)
23
+ return ch.cross(verts, n, axisa=1, axisb=1) - m
24
+
25
+
26
+ def unpose_and_select_rays(rays, Vi, smpl, rn_b, rn_m):
27
+ v_ids = visible_boundary_edge_verts(rn_b, rn_m)
28
+ verts = smpl.r[v_ids]
29
+
30
+ n, m = plucker(rays)
31
+ dist = np.linalg.norm(np.cross(verts.reshape(-1, 1, 3), n, axisa=2, axisb=1) - m, axis=2)
32
+
33
+ ray_matches = np.argmin(dist, axis=0)
34
+ vert_matches = np.argmin(dist, axis=1)
35
+
36
+ rays_u_r = np.zeros_like(rays)
37
+
38
+ M = Vi[:, :, v_ids]
39
+ T = smpl.v_posevariation[v_ids].r
40
+
41
+ tmp0 = M[:, :, ray_matches] * np.hstack((rays[:, 0], np.ones((rays.shape[0], 1)))).T.reshape(1, 4, -1)
42
+ tmp1 = M[:, :, ray_matches] * np.hstack((rays[:, 1], np.ones((rays.shape[0], 1)))).T.reshape(1, 4, -1)
43
+
44
+ rays_u_r[:, 0] = np.sum(tmp0, axis=1).T[:, :3] - T[ray_matches]
45
+ rays_u_r[:, 1] = np.sum(tmp1, axis=1).T[:, :3] - T[ray_matches]
46
+
47
+ rays_u_v = np.zeros_like(rays[vert_matches])
48
+
49
+ tmp0 = M * np.hstack((rays[vert_matches, 0], np.ones((verts.shape[0], 1)))).T.reshape(1, 4, -1)
50
+ tmp1 = M * np.hstack((rays[vert_matches, 1], np.ones((verts.shape[0], 1)))).T.reshape(1, 4, -1)
51
+
52
+ rays_u_v[:, 0] = np.sum(tmp0, axis=1).T[:, :3] - T
53
+ rays_u_v[:, 1] = np.sum(tmp1, axis=1).T[:, :3] - T
54
+
55
+ valid_rays = dist[np.vstack((ray_matches, range(dist.shape[1]))).tolist()] < 0.12
56
+ valid_verts = dist[np.vstack((range(dist.shape[0]), vert_matches)).tolist()] < 0.03
57
+
58
+ ray_matches = ray_matches[valid_rays]
59
+
60
+ return np.concatenate((v_ids[ray_matches], v_ids[valid_verts])), \
61
+ np.concatenate((rays_u_r[valid_rays], rays_u_v[valid_verts]))
62
+
63
+
64
+ def rays_from_points(points, camera):
65
+ points = np.hstack((points, np.ones((points.shape[0], 1))))
66
+ points3d = camera.unproject_points(points)
67
+
68
+ c0 = -camera.t.r
69
+
70
+ return np.hstack((np.repeat(c0.reshape(1, 1, 3), points3d.shape[0], axis=0), points3d.reshape(-1, 1, 3)))
71
+
72
+
73
+ def rays_from_silh(mask, camera):
74
+
75
+ if cv2.__version__[0] == '2':
76
+ contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
77
+ else:
78
+ _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
79
+
80
+ silh = np.zeros_like(mask)
81
+
82
+ for con in contours:
83
+ cv2.drawContours(silh, [con], 0, 1, 1)
84
+
85
+ points = np.vstack(np.where(silh == 1)[::-1]).astype(np.float32).T
86
+ rays = rays_from_points(points, camera)
87
+
88
+ return rays
89
+
90
+
91
+ def ray_objective(f, sigma, base_smpl, camera, vis_rn_b, vis_rn_m):
92
+ base_smpl.pose[:] = f.pose
93
+ camera.t[:] = f.trans
94
+
95
+ f.v_ids, f.rays_u = unpose_and_select_rays(f.rays, f.Vi, base_smpl, vis_rn_b, vis_rn_m)
96
+ f.verts = base_smpl.v_shaped_personal[f.v_ids]
97
+ f.dist = distance_function(f.rays_u, f.verts)
98
+
99
+ return GMOf(f.dist, sigma)
videoavatars/lib/rays.pyc ADDED
Binary file (4.37 kB). View file
 
videoavatars/models/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #!/usr/bin/env python2
2
+ # -*- coding: utf-8 -*-
videoavatars/models/__init__.pyc ADDED
Binary file (165 Bytes). View file
 
videoavatars/models/bodyparts.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python2
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import cPickle as pkl
5
+ import numpy as np
6
+
7
+ _cache = None
8
+
9
+
10
+ def get_bodypart_vertex_ids():
11
+ global _cache
12
+
13
+ if _cache is None:
14
+ with open('assets/bodyparts.pkl', 'rb') as fp:
15
+ _cache = pkl.load(fp)
16
+
17
+ return _cache
18
+
19
+
20
+ def faces_no_hands(f):
21
+ v_ids = get_bodypart_vertex_ids()
22
+ hands = np.concatenate((v_ids['hand_r'], v_ids['hand_l']))
23
+ return np.array(filter(lambda face: np.intersect1d(face, hands).size == 0, f))
24
+
25
+
26
+ def regularize_laplace():
27
+ reg = np.ones(6890)
28
+ v_ids = get_bodypart_vertex_ids()
29
+
30
+ reg[v_ids['face']] = 12.
31
+ reg[v_ids['hand_l']] = 5.
32
+ reg[v_ids['hand_r']] = 5.
33
+ reg[v_ids['fingers_l']] = 8.
34
+ reg[v_ids['fingers_r']] = 8.
35
+ reg[v_ids['foot_l']] = 5.
36
+ reg[v_ids['foot_r']] = 5.
37
+ reg[v_ids['toes_l']] = 8.
38
+ reg[v_ids['toes_r']] = 8.
39
+ reg[v_ids['ear_l']] = 10.
40
+ reg[v_ids['ear_r']] = 10.
41
+
42
+ return reg
43
+
44
+
45
+ def regularize_model():
46
+ reg = np.ones(6890)
47
+ v_ids = get_bodypart_vertex_ids()
48
+
49
+ reg[v_ids['face']] = 7.
50
+ reg[v_ids['hand_l']] = 12.
51
+ reg[v_ids['hand_r']] = 12.
52
+ reg[v_ids['fingers_l']] = 15.
53
+ reg[v_ids['fingers_r']] = 15.
54
+ reg[v_ids['foot_l']] = 12.
55
+ reg[v_ids['foot_r']] = 12.
56
+ reg[v_ids['toes_l']] = 15.
57
+ reg[v_ids['toes_r']] = 15.
58
+ reg[v_ids['ear_l']] = 10.
59
+ reg[v_ids['ear_r']] = 10.
60
+
61
+ return reg
62
+
63
+
64
+ def regularize_symmetry():
65
+ reg = np.ones(6890)
66
+ v_ids = get_bodypart_vertex_ids()
67
+
68
+ reg[v_ids['face']] = 10.
69
+ reg[v_ids['hand_l']] = 10.
70
+ reg[v_ids['hand_r']] = 10.
71
+ reg[v_ids['foot_l']] = 10.
72
+ reg[v_ids['foot_r']] = 10.
73
+ reg[v_ids['ear_l']] = 5.
74
+ reg[v_ids['ear_r']] = 5.
75
+
76
+ return reg
videoavatars/models/bodyparts.pyc ADDED
Binary file (2.65 kB). View file
 
videoavatars/models/smpl.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python2
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import chumpy as ch
5
+ import numpy as np
6
+ import cPickle as pkl
7
+ import scipy.sparse as sp
8
+ from chumpy.ch import Ch
9
+ from vendor.smpl.posemapper import posemap, Rodrigues
10
+ from vendor.smpl.serialization import backwards_compatibility_replacements
11
+
12
+
13
+ VERT_NOSE = 331
14
+ VERT_EAR_L = 3485
15
+ VERT_EAR_R = 6880
16
+ VERT_EYE_L = 2802
17
+ VERT_EYE_R = 6262
18
+
19
+
20
+ class Smpl(Ch):
21
+ """
22
+ Class to store SMPL object with slightly improved code and access to more matrices
23
+ """
24
+ terms = 'model',
25
+ dterms = 'trans', 'betas', 'pose', 'v_personal'
26
+
27
+ def __init__(self, *args, **kwargs):
28
+ self.on_changed(self._dirty_vars)
29
+
30
+ def on_changed(self, which):
31
+ if not hasattr(self, 'trans'):
32
+ self.trans = ch.zeros(3)
33
+
34
+ if not hasattr(self, 'betas'):
35
+ self.betas = ch.zeros(10)
36
+
37
+ if not hasattr(self, 'pose'):
38
+ self.pose = ch.zeros(72)
39
+
40
+ if 'model' in which:
41
+ if not isinstance(self.model, dict):
42
+ dd = pkl.load(open(self.model))
43
+ else:
44
+ dd = self.model
45
+
46
+ backwards_compatibility_replacements(dd)
47
+
48
+ for s in ['posedirs', 'shapedirs']:
49
+ if (s in dd) and not hasattr(dd[s], 'dterms'):
50
+ dd[s] = ch.array(dd[s])
51
+
52
+ self.f = dd['f']
53
+ self.v_template = dd['v_template']
54
+ if not hasattr(self, 'v_personal'):
55
+ self.v_personal = ch.zeros_like(self.v_template)
56
+ self.shapedirs = dd['shapedirs']
57
+ self.J_regressor = dd['J_regressor']
58
+ if 'J_regressor_prior' in dd:
59
+ self.J_regressor_prior = dd['J_regressor_prior']
60
+ if sp.issparse(self.J_regressor):
61
+ self.J_regressor = self.J_regressor.toarray()
62
+ self.bs_type = dd['bs_type']
63
+ self.weights = dd['weights']
64
+ if 'vert_sym_idxs' in dd:
65
+ self.vert_sym_idxs = dd['vert_sym_idxs']
66
+ if 'weights_prior' in dd:
67
+ self.weights_prior = dd['weights_prior']
68
+ self.kintree_table = dd['kintree_table']
69
+ self.posedirs = dd['posedirs']
70
+
71
+ self._set_up()
72
+
73
+ def _set_up(self):
74
+ self.v_shaped = self.shapedirs.dot(self.betas) + self.v_template
75
+ self.v_shaped_personal = self.v_shaped + self.v_personal
76
+ self.J = ch.sum(self.J_regressor.T.reshape(-1, 1, 24) * self.v_shaped.reshape(-1, 3, 1), axis=0).T
77
+ self.v_posevariation = self.posedirs.dot(posemap(self.bs_type)(self.pose))
78
+ self.v_poseshaped = self.v_shaped_personal + self.v_posevariation
79
+
80
+ self.A, A_global = self._global_rigid_transformation()
81
+ self.Jtr = ch.vstack([g[:3, 3] for g in A_global])
82
+ self.J_transformed = self.Jtr + self.trans.reshape((1, 3))
83
+
84
+ self.V = self.A.dot(self.weights.T)
85
+
86
+ rest_shape_h = ch.hstack((self.v_poseshaped, ch.ones((self.v_poseshaped.shape[0], 1))))
87
+ self.v_posed = ch.sum(self.V.T * rest_shape_h.reshape(-1, 4, 1), axis=1)[:, :3]
88
+ self.v = self.v_posed + self.trans
89
+
90
+ def _global_rigid_transformation(self):
91
+ results = {}
92
+ pose = self.pose.reshape((-1, 3))
93
+ parent = {i: self.kintree_table[0, i] for i in range(1, self.kintree_table.shape[1])}
94
+
95
+ with_zeros = lambda x: ch.vstack((x, ch.array([[0.0, 0.0, 0.0, 1.0]])))
96
+ pack = lambda x: ch.hstack([ch.zeros((4, 3)), x.reshape((4, 1))])
97
+
98
+ results[0] = with_zeros(ch.hstack((Rodrigues(pose[0, :]), self.J[0, :].reshape((3, 1)))))
99
+
100
+ for i in range(1, self.kintree_table.shape[1]):
101
+ results[i] = results[parent[i]].dot(with_zeros(ch.hstack((
102
+ Rodrigues(pose[i, :]), # rotation around bone endpoint
103
+ (self.J[i, :] - self.J[parent[i], :]).reshape((3, 1)) # bone
104
+ ))))
105
+
106
+ results = [results[i] for i in sorted(results.keys())]
107
+ results_global = results
108
+
109
+ # subtract rotated J position
110
+ results2 = [results[i] - (pack(
111
+ results[i].dot(ch.concatenate((self.J[i, :], [0]))))
112
+ ) for i in range(len(results))]
113
+ result = ch.dstack(results2)
114
+
115
+ return result, results_global
116
+
117
+ def compute_r(self):
118
+ return self.v.r
119
+
120
+ def compute_dr_wrt(self, wrt):
121
+ if wrt is not self.trans and wrt is not self.betas and wrt is not self.pose and wrt is not self.v_personal:
122
+ return None
123
+
124
+ return self.v.dr_wrt(wrt)
125
+
126
+
127
+ def copy_smpl(smpl, model):
128
+ new = Smpl(model, betas=smpl.betas)
129
+ new.pose[:] = smpl.pose.r
130
+ new.trans[:] = smpl.trans.r
131
+
132
+ return new
133
+
134
+
135
+ def joints_coco(smpl):
136
+ J = smpl.J_transformed
137
+ nose = smpl[VERT_NOSE]
138
+ ear_l = smpl[VERT_EAR_L]
139
+ ear_r = smpl[VERT_EAR_R]
140
+ eye_l = smpl[VERT_EYE_L]
141
+ eye_r = smpl[VERT_EYE_R]
142
+
143
+ shoulders_m = ch.sum(J[[14, 13]], axis=0) / 2.
144
+ neck = J[12] - 0.55 * (J[12] - shoulders_m)
145
+
146
+ return ch.vstack((
147
+ nose,
148
+ neck,
149
+ 2.1 * (J[14] - shoulders_m) + neck,
150
+ J[[19, 21]],
151
+ 2.1 * (J[13] - shoulders_m) + neck,
152
+ J[[18, 20]],
153
+ J[2] + 0.38 * (J[2] - J[1]),
154
+ J[[5, 8]],
155
+ J[1] + 0.38 * (J[1] - J[2]),
156
+ J[[4, 7]],
157
+ eye_r,
158
+ eye_l,
159
+ ear_r,
160
+ ear_l,
161
+ ))
162
+
163
+
164
+ def model_params_in_camera_coords(trans, pose, J0, camera_t, camera_rt):
165
+ root = Rodrigues(np.matmul(Rodrigues(camera_rt).r, Rodrigues(pose[:3]).r)).r.reshape(-1)
166
+ pose[:3] = root
167
+
168
+ trans = (Rodrigues(camera_rt).dot(J0 + trans) - J0 + camera_t).r
169
+
170
+ return trans, pose
171
+
172
+
173
+ if __name__ == '__main__':
174
+ smpl = Smpl(model='../vendor/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl')
175
+ smpl.pose[:] = np.random.randn(72) * .2
176
+ smpl.pose[0] = np.pi
177
+ # smpl.v_personal[:] = np.random.randn(*smpl.shape) / 500.
178
+
179
+ # render test
180
+ from opendr.renderer import ColoredRenderer
181
+ from opendr.camera import ProjectPoints
182
+ from opendr.lighting import LambertianPointLight
183
+
184
+ rn = ColoredRenderer()
185
+
186
+ # Assign attributes to renderer
187
+ w, h = (640, 480)
188
+
189
+ rn.camera = ProjectPoints(v=smpl, rt=np.zeros(3), t=np.array([0, 0, 3.]), f=np.array([w, w]),
190
+ c=np.array([w, h]) / 2., k=np.zeros(5))
191
+ rn.frustum = {'near': 1., 'far': 10., 'width': w, 'height': h}
192
+ rn.set(v=smpl, f=smpl.f, bgcolor=np.zeros(3))
193
+
194
+ # Construct point light source
195
+ rn.vc = LambertianPointLight(
196
+ f=smpl.f,
197
+ v=rn.v,
198
+ num_verts=len(smpl),
199
+ light_pos=np.array([-1000, -1000, -2000]),
200
+ vc=np.ones_like(smpl) * .9,
201
+ light_color=np.array([1., 1., 1.]))
202
+
203
+ # Show it using OpenCV
204
+ import cv2
205
+
206
+ cv2.imshow('render_SMPL', rn.r)
207
+ print ('..Print any key while on the display window')
208
+ cv2.waitKey(0)
209
+ cv2.destroyAllWindows()
videoavatars/models/smpl.pyc ADDED
Binary file (8.57 kB). View file
 
videoavatars/prepare_data/2djoints2hdf5.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python2
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import os
5
+ import argparse
6
+ import h5py
7
+ import json
8
+ import numpy as np
9
+
10
+ from glob import glob
11
+ from tqdm import tqdm
12
+ import pdb
13
+ """
14
+
15
+ This script stores OpenPose 2D keypoints from json files in the given directory in a compressed hdf5 file.
16
+
17
+ Example:
18
+ $ python 2djoints2hdf5.py dataset/subject/openpose_detections keypoints.hdf5
19
+
20
+ """
21
+
22
+
23
+ parser = argparse.ArgumentParser()
24
+ parser.add_argument('src_folder', type=str)
25
+ parser.add_argument('target', type=str)
26
+
27
+ args = parser.parse_args()
28
+
29
+ out_file = args.target
30
+ pose_dir = args.src_folder
31
+ pose_files = sorted(glob(os.path.join(pose_dir, '*.json')))
32
+
33
+ with h5py.File(out_file, 'w') as f:
34
+ poses_dset = f.create_dataset("keypoints", (len(pose_files), 54), 'f', chunks=True, compression="lzf") # 54
35
+ for i, pose_file in enumerate(tqdm(pose_files)):
36
+ with open(pose_file) as fp:
37
+ pose = np.array(json.load(fp)['people'][0]['pose_keypoints_2d'])
38
+ poses_dset[i] = pose
videoavatars/prepare_data/create_camera.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python2
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import numpy as np
5
+ import argparse
6
+ import cPickle as pkl
7
+
8
+ """
9
+
10
+ This script creates a .pkl file using the given camera intrinsics.
11
+
12
+ Example:
13
+ $ python create_camera.py camera.pkl 1080 1080 -f 900.0 900.0
14
+
15
+ """
16
+
17
+ parser = argparse.ArgumentParser()
18
+ parser.add_argument('out', type=str, help="Output file (.pkl)")
19
+ parser.add_argument('width', type=int, help="Frame width in px")
20
+ parser.add_argument('height', type=int, help="Frame height in px")
21
+ parser.add_argument('-f', type=float, nargs='*', help="Focal length in px (2,)")
22
+ parser.add_argument('-c', type=float, nargs='*', help="Principal point in px (2,)")
23
+ parser.add_argument('-k', type=float, nargs='*', help="Distortion coefficients (5,)")
24
+
25
+ args = parser.parse_args()
26
+
27
+ camera_data = {
28
+ 'camera_t': np.zeros(3),
29
+ 'camera_rt': np.zeros(3),
30
+ 'camera_f': np.array([args.width, args.width]),
31
+ 'camera_c': np.array([args.width, args.height]) / 2.,
32
+ 'camera_k': np.zeros(5),
33
+ 'width': args.width,
34
+ 'height': args.height,
35
+ }
36
+
37
+ if args.f is not None:
38
+ if len(args.f) is not 2:
39
+ raise Exception('Focal length should be of shape (2,)')
40
+
41
+ camera_data['camera_f'] = np.array(args.f)
42
+
43
+ if args.c is not None:
44
+ if len(args.c) is not 2:
45
+ raise Exception('Principal point should be of shape (2,)')
46
+
47
+ camera_data['camera_c'] = np.array(args.c)
48
+
49
+ if args.k is not None:
50
+ if len(args.k) is not 5:
51
+ raise Exception('Distortion coefficients should be of shape (5,)')
52
+
53
+ camera_data['camera_k'] = np.array(args.k)
54
+
55
+ with open(args.out, 'wb') as f:
56
+ pkl.dump(camera_data, f, protocol=2)
videoavatars/prepare_data/masks2hdf5.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python2
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import os
5
+ import argparse
6
+ import h5py
7
+ import cv2
8
+ import numpy as np
9
+
10
+ from glob import glob
11
+ from tqdm import tqdm
12
+
13
+ """
14
+
15
+ This script stores image masks from a directory in a compressed hdf5 file.
16
+
17
+ Example:
18
+ $ python masks2hdf5.py dataset/subject/masks masks.hdf5
19
+
20
+ """
21
+
22
+ parser = argparse.ArgumentParser()
23
+ parser.add_argument('src', type=str)
24
+ parser.add_argument('target', type=str)
25
+
26
+ args = parser.parse_args()
27
+
28
+ out_file = args.target
29
+ mask_dir = args.src
30
+ mask_files = sorted(glob(os.path.join(mask_dir, '*.png')) + glob(os.path.join(mask_dir, '*.jpg')))
31
+
32
+ with h5py.File(out_file, 'w') as f:
33
+ dset = None
34
+
35
+ for i, silh_file in enumerate(tqdm(mask_files)):
36
+ silh = cv2.imread(silh_file, cv2.IMREAD_GRAYSCALE)
37
+
38
+ if dset is None:
39
+ dset = f.create_dataset("masks", (len(mask_files), silh.shape[0], silh.shape[1]), 'b', chunks=True, compression="lzf")
40
+
41
+ _, silh = cv2.threshold(silh, 100, 255, cv2.THRESH_BINARY)
42
+ dset[i] = silh.astype(np.bool)
videoavatars/render/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ #!/usr/bin/env python2
2
+ # -*- coding: utf-8 -*-
3
+
videoavatars/render/camera.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python2
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import cv2
5
+ import numpy as np
6
+ import chumpy as ch
7
+ from chumpy import Ch, depends_on
8
+ from chumpy.utils import col
9
+ from opendr.geometry import Rodrigues
10
+
11
+
12
+ class OrthoProjectPoints(Ch):
13
+ terms = 'near', 'far', 'width', 'height'
14
+ dterms = 'v', 'rt', 't', 'left', 'right', 'bottom', 'top'
15
+
16
+ def compute_r(self):
17
+ return self.r_and_derivatives.r
18
+
19
+ def compute_dr_wrt(self, wrt):
20
+ if wrt not in [self.v, self.rt, self.t, self.left, self.right, self.bottom, self.top]:
21
+ return None
22
+
23
+ return self.r_and_derivatives.dr_wrt(wrt)
24
+
25
+ @depends_on('t', 'rt')
26
+ def view_mtx(self):
27
+ R = cv2.Rodrigues(self.rt.r)[0]
28
+ return np.hstack((R, col(self.t.r)))
29
+
30
+ @property
31
+ def r_and_derivatives(self):
32
+ tmp = self.v.dot(Rodrigues(self.rt)) + self.t
33
+
34
+ return ch.hstack((
35
+ col(2. / (self.right - self.left) * tmp[:, 0] - (self.right + self.left) / (self.right - self.left) + 1.) * self.width / 2.,
36
+ col(2. / (self.bottom - self.top) * tmp[:, 1] - (self.bottom + self.top) / (self.bottom - self.top) + 1.) * self.height / 2.,
37
+ ))
38
+
videoavatars/render/renderer.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python2
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import platform
5
+ import numpy as np
6
+
7
+ from chumpy import Ch, depends_on
8
+ from opendr.renderer import BaseRenderer, ColoredRenderer, TexturedRenderer
9
+ from opendr.renderer import draw_edge_visibility, draw_boundary_images, draw_boundaryid_image
10
+
11
+ if platform.system() == 'Darwin':
12
+ from opendr.contexts.ctx_mac import OsContext
13
+ else:
14
+ from opendr.contexts.ctx_mesa import OsContext
15
+ from opendr.contexts._constants import *
16
+
17
+
18
+ class OrthoBaseRenderer(BaseRenderer):
19
+ terms = ['f', 'overdraw']
20
+ dterms = ['ortho', 'v']
21
+
22
+ @property
23
+ def v(self):
24
+ return self.ortho.v
25
+
26
+ @v.setter
27
+ def v(self, newval):
28
+ self.ortho.v = newval
29
+
30
+ @depends_on('f', 'ortho', 'overdraw')
31
+ def barycentric_image(self):
32
+ return super(OrthoBaseRenderer, self).barycentric_image
33
+
34
+ @depends_on(terms+dterms)
35
+ def boundaryid_image(self):
36
+ self._call_on_changed()
37
+ return draw_boundaryid_image(self.glb, self.v.r, self.f, self.vpe, self.fpe, self.ortho)
38
+
39
+ @depends_on('f', 'ortho', 'overdraw')
40
+ def visibility_image(self):
41
+ return super(OrthoBaseRenderer, self).visibility_image
42
+
43
+ @depends_on('f', 'ortho')
44
+ def edge_visibility_image(self):
45
+ self._call_on_changed()
46
+ return draw_edge_visibility(self.glb, self.v.r, self.vpe, self.f)
47
+
48
+
49
+ class OrthoColoredRenderer(OrthoBaseRenderer, ColoredRenderer):
50
+ terms = 'f', 'background_image', 'overdraw', 'num_channels'
51
+ dterms = 'vc', 'ortho', 'bgcolor'
52
+
53
+ def compute_r(self):
54
+ return self.color_image
55
+
56
+ def compute_dr_wrt(self, wrt):
57
+ raise NotImplementedError
58
+
59
+ def on_changed(self, which):
60
+ if 'ortho' in which:
61
+ w = self.ortho.width
62
+ h = self.ortho.height
63
+ self.glf = OsContext(np.int(w), np.int(h), typ=GL_FLOAT)
64
+ _setup_ortho(self.glf, self.ortho.left.r, self.ortho.right.r, self.ortho.bottom.r, self.ortho.top.r,
65
+ self.ortho.near, self.ortho.far, self.ortho.view_mtx)
66
+ self.glf.Viewport(0, 0, w, h)
67
+ self.glb = OsContext(np.int(w), np.int(h), typ=GL_UNSIGNED_BYTE)
68
+ self.glb.Viewport(0, 0, w, h)
69
+ _setup_ortho(self.glb, self.ortho.left.r, self.ortho.right.r, self.ortho.bottom.r, self.ortho.top.r,
70
+ self.ortho.near, self.ortho.far, self.ortho.view_mtx)
71
+
72
+ if not hasattr(self, 'num_channels'):
73
+ self.num_channels = 3
74
+
75
+ if not hasattr(self, 'bgcolor'):
76
+ self.bgcolor = Ch(np.array([.5] * self.num_channels))
77
+ which.add('bgcolor')
78
+
79
+ if not hasattr(self, 'overdraw'):
80
+ self.overdraw = True
81
+
82
+ if 'bgcolor' in which:
83
+ self.glf.ClearColor(self.bgcolor.r[0], self.bgcolor.r[1 % self.num_channels],
84
+ self.bgcolor.r[2 % self.num_channels], 1.)
85
+
86
+ @depends_on('f', 'ortho', 'vc')
87
+ def boundarycolor_image(self):
88
+ return self.draw_boundarycolor_image(with_vertex_colors=True)
89
+
90
+ @depends_on('f', 'ortho')
91
+ def boundary_images(self):
92
+ self._call_on_changed()
93
+ return draw_boundary_images(self.glb, self.v.r, self.f, self.vpe, self.fpe, self.ortho)
94
+
95
+ @depends_on(terms+dterms)
96
+ def color_image(self):
97
+ return super(OrthoColoredRenderer, self).color_image
98
+
99
+ @property
100
+ def shape(self):
101
+ return (self.ortho.height, self.ortho.width, 3)
102
+
103
+
104
+ class OrthoTexturedRenderer(OrthoColoredRenderer, TexturedRenderer):
105
+ terms = 'f', 'ft', 'background_image', 'overdraw', 'tex_filter_mag', 'tex_filter_min'
106
+ dterms = 'vc', 'ortho', 'bgcolor', 'texture_image', 'vt'
107
+
108
+ def compute_dr_wrt(self, wrt):
109
+ raise NotImplementedError
110
+
111
+ def on_changed(self, which):
112
+ OrthoColoredRenderer.on_changed(self, which)
113
+
114
+ # have to redo if ortho changes, b/c ortho triggers new context
115
+ if 'texture_image' in which or 'ortho' in which:
116
+ gl = self.glf
117
+ texture_data = np.array(self.texture_image * 255., dtype='uint8', order='C')
118
+ tmp = np.zeros(1, dtype=np.uint32)
119
+
120
+ self.release_textures()
121
+ gl.GenTextures(1, tmp)
122
+
123
+ self.textureID = tmp[0]
124
+ gl.BindTexture(GL_TEXTURE_2D, self.textureID)
125
+
126
+ gl.TexImage2Dub(GL_TEXTURE_2D, 0, GL_RGB, texture_data.shape[1], texture_data.shape[0], 0, GL_RGB,
127
+ texture_data.ravel())
128
+ gl.GenerateMipmap(GL_TEXTURE_2D)
129
+
130
+ def release_textures(self):
131
+ if hasattr(self, 'textureID'):
132
+ arr = np.asarray(np.array([self.textureID]), np.uint32)
133
+ self.glf.DeleteTextures(arr)
134
+
135
+ def texture_mapping_on(self, gl, with_vertex_colors):
136
+ gl.Enable(GL_TEXTURE_2D)
137
+ gl.BindTexture(GL_TEXTURE_2D, self.textureID)
138
+ gl.TexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
139
+ gl.TexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR)
140
+ gl.TexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE if with_vertex_colors else GL_REPLACE)
141
+ gl.EnableClientState(GL_TEXTURE_COORD_ARRAY)
142
+
143
+ @depends_on(dterms+terms)
144
+ def boundaryid_image(self):
145
+ return super(OrthoTexturedRenderer, self).boundaryid_image
146
+
147
+ @depends_on(terms+dterms)
148
+ def color_image(self):
149
+ self.glf.BindTexture(GL_TEXTURE_2D, self.textureID)
150
+ return super(OrthoTexturedRenderer, self).color_image
151
+
152
+ @depends_on(terms+dterms)
153
+ def boundarycolor_image(self):
154
+ self.glf.BindTexture(GL_TEXTURE_2D, self.textureID)
155
+ return super(OrthoTexturedRenderer, self).boundarycolor_image
156
+
157
+ @property
158
+ def shape(self):
159
+ return (self.ortho.height, self.ortho.width, 3)
160
+
161
+ @depends_on('vt', 'ft')
162
+ def mesh_tex_coords(self):
163
+ ftidxs = self.ft.ravel()
164
+ data = np.asarray(self.vt.r[ftidxs].astype(np.float32)[:, 0:2], np.float32, order='C')
165
+ data[:, 1] = 1.0 - 1.0 * data[:, 1]
166
+ return data
167
+
168
+
169
+ def _setup_ortho(gl, l, r, b, t, near, far, view_matrix):
170
+ gl.MatrixMode(GL_PROJECTION)
171
+ gl.LoadIdentity()
172
+ gl.Ortho(l, r, t, b, near, far) # top and bottom switched for opencv coordinate system
173
+
174
+ gl.MatrixMode(GL_MODELVIEW)
175
+ gl.LoadIdentity()
176
+ gl.Rotatef(180, 1, 0, 0)
177
+
178
+ view_mtx = np.asarray(np.vstack((view_matrix, np.array([0, 0, 0, 1]))), np.float32, order='F')
179
+ gl.MultMatrixf(view_mtx)
180
+
181
+ gl.Enable(GL_DEPTH_TEST)
182
+ gl.PolygonMode(GL_BACK, GL_FILL)
183
+ gl.Disable(GL_LIGHTING)
184
+ gl.Disable(GL_CULL_FACE)
185
+ gl.PixelStorei(GL_PACK_ALIGNMENT, 1)
186
+ gl.PixelStorei(GL_UNPACK_ALIGNMENT, 1)
187
+
188
+ gl.UseProgram(0)
videoavatars/requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ numpy>=1.11.0
2
+ opendr>=0.76
3
+
4
+ scipy>=0.19.0
5
+ matplotlib==2.*
6
+ h5py==2.7.*
7
+ tqdm==4.*
8
+ chumpy>=0.67
videoavatars/run_step1.sh ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #ca videoavatar
3
+
4
+ if [ "$#" -le 1 ]; then
5
+ echo "usage: run_step1.sh <path_to_subject_directory> <output_directory> [options]" >&2
6
+ exit 1
7
+ fi
8
+
9
+ SUBJ="$1"
10
+
11
+ OUT="$2"
12
+ gender="$3"
13
+
14
+ if [[ $gender = "female" ]]; then
15
+ MODEL='--model vendor/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl'
16
+ echo "use female smpl model"
17
+ elif [[ $gender = "male" ]]; then
18
+ MODEL='--model vendor/smpl/models/basicmodel_m_lbs_10_207_0_v1.0.0.pkl'
19
+ echo "use male smpl model"
20
+ else
21
+ echo "Invalid gender: $gender"
22
+ exit 1
23
+ fi
24
+
25
+ echo $MODEL
26
+ python step1_pose.py \
27
+ $SUBJ/keypoints.hdf5 $SUBJ/masks.hdf5 $SUBJ/camera.pkl $OUT/reconstructed_poses.hdf5 $MODEL #${@:3}
videoavatars/run_step2.sh ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ if [ "$#" -le 1 ]; then
4
+ echo "usage: run_step2.sh <path_to_subject_directory> <output_directory> [options]" >&2
5
+ exit 1
6
+ fi
7
+
8
+ SUBJ="$1"
9
+ OUT="$2"
10
+
11
+ #if [[ $SUBJ = *"female"* ]]; then
12
+ MODEL='--model vendor/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl'
13
+ #fi
14
+
15
+ python step2_consensus.py $SUBJ/reconstructed_poses.hdf5 $SUBJ/masks.hdf5 $SUBJ/camera.pkl $OUT/consensus.pkl --obj_out $OUT/consensus.obj $MODEL ${@:3}
videoavatars/run_step3.sh ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ if [ "$#" -le 1 ]; then
4
+ echo "usage: run_step3.sh <path_to_subject_directory> <output_directory> [options]" >&2
5
+ exit 1
6
+ fi
7
+
8
+ SUBJ="$1"
9
+ OUT="$2"
10
+
11
+ if [[ $SUBJ = *"female"* ]]; then
12
+ MODEL='--model vendor/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl'
13
+ fi
14
+
15
+ python step3_texture.py $SUBJ/consensus.pkl $SUBJ/camera.pkl $SUBJ/$(basename $SUBJ).mp4 $SUBJ/reconstructed_poses.hdf5 $SUBJ/masks.hdf5 $OUT/tex-$(basename $SUBJ).jpg $MODEL ${@:3}
videoavatars/step1_pose.py ADDED
@@ -0,0 +1,377 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python2
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import cv2, pdb
5
+ import h5py
6
+ import argparse
7
+ import numpy as np
8
+ import chumpy as ch
9
+ import cPickle as pkl
10
+
11
+ from opendr.camera import ProjectPoints
12
+ from opendr.lighting import LambertianPointLight
13
+ from opendr.renderer import ColoredRenderer
14
+ from opendr.filters import gaussian_pyramid
15
+
16
+ from util import im
17
+ from util.logger import log
18
+ from lib.frame import FrameData
19
+ from models.smpl import Smpl, copy_smpl, joints_coco
20
+ from models.bodyparts import faces_no_hands
21
+
22
+ from vendor.smplify.sphere_collisions import SphereCollisions
23
+ from vendor.smplify.robustifiers import GMOf
24
+
25
+
26
+ def get_cb(viz_rn, f):
27
+ if viz_rn is not None:
28
+ viz_rn.set(v=f.smpl, background_image=np.dstack((f.mask, f.mask, f.mask)))
29
+ viz_rn.vc.set(v=f.smpl)
30
+
31
+ def cb(_):
32
+ debug = np.array(viz_rn.r)
33
+
34
+ for j in f.J_proj.r:
35
+ cv2.circle(debug, tuple(j.astype(np.int)), 3, (0, 0, 0.8), -1)
36
+ for j in f.keypoints[:, :2]:
37
+ cv2.circle(debug, tuple(j.astype(np.int)), 3, (0, 0.8, 0), -1)
38
+
39
+ im.show(debug, id='pose', waittime=1)
40
+ else:
41
+ cb = None
42
+
43
+ return cb
44
+
45
+
46
+ def collision_obj(smpl, regs):
47
+ sp = SphereCollisions(pose=smpl.pose, betas=smpl.betas, model=smpl, regs=regs)
48
+ sp.no_hands = True
49
+
50
+ return sp
51
+
52
+
53
+ def pose_prior_obj(smpl, prior_data):
54
+ return (smpl.pose[3:] - prior_data['mean']).reshape(1, -1).dot(prior_data['prec'])
55
+
56
+
57
+ def height_predictor(b2m, betas):
58
+ return ch.hstack((betas.reshape(1, -1), [[1]])).dot(b2m)
59
+
60
+
61
+ def init(frames, body_height, b2m, viz_rn):
62
+ betas = frames[0].smpl.betas
63
+
64
+ E_height = None
65
+ if body_height is not None:
66
+ E_height = height_predictor(b2m, betas) - body_height * 1000.
67
+
68
+ # first get a rough pose for all frames individually
69
+ for i, f in enumerate(frames):
70
+ if np.sum(f.keypoints[[0, 2, 5, 8, 11], 2]) > 3.:
71
+ if f.keypoints[2, 0] > f.keypoints[5, 0]:
72
+ f.smpl.pose[0] = 0
73
+ f.smpl.pose[2] = np.pi
74
+ # pdb.set_trace()
75
+ E_init = {
76
+ 'init_pose_{}'.format(i): f.pose_obj[[0, 2, 5, 8, 11]]
77
+ }
78
+
79
+ x0 = [f.smpl.trans, f.smpl.pose[:3]]
80
+
81
+ if E_height is not None and i == 0:
82
+ E_init['height'] = E_height
83
+ E_init['betas'] = betas
84
+ x0.append(betas)
85
+
86
+ ch.minimize(
87
+ E_init,
88
+ x0,
89
+ method='dogleg',
90
+ options={
91
+ 'e_3': .01,
92
+ },
93
+ callback=get_cb(viz_rn, f)
94
+ )
95
+
96
+ weights = zip(
97
+ [5., 4.5, 4.],
98
+ [5., 4., 3.]
99
+ )
100
+
101
+ E_betas = betas - betas.r
102
+
103
+ for w_prior, w_betas in weights:
104
+ x0 = [betas]
105
+
106
+ E = {
107
+ 'betas': E_betas * w_betas,
108
+ }
109
+
110
+ if E_height is not None:
111
+ E['height'] = E_height
112
+
113
+ for i, f in enumerate(frames):
114
+ if np.sum(f.keypoints[[0, 2, 5, 8, 11], 2]) > 3.:
115
+ x0.extend([f.smpl.pose[range(21) + range(27, 30) + range(36, 60)], f.smpl.trans])
116
+ E['pose_{}'.format(i)] = f.pose_obj
117
+ E['prior_{}'.format(i)] = f.pose_prior_obj * w_prior
118
+
119
+ ch.minimize(
120
+ E,
121
+ x0,
122
+ method='dogleg',
123
+ options={
124
+ 'e_3': .01,
125
+ },
126
+ callback=get_cb(viz_rn, frames[0])
127
+ )
128
+
129
+
130
+ def reinit_frame(frame, null_pose, nohands, viz_rn):
131
+
132
+ if (np.sum(frame.pose_obj.r ** 2) > 625 or np.sum(frame.pose_prior_obj.r ** 2) > 75)\
133
+ and np.sum(frame.keypoints[[0, 2, 5, 8, 11], 2]) > 3.:
134
+
135
+ log.info('Tracking error too large. Re-init frame...')
136
+
137
+ x0 = [frame.smpl.pose[:3], frame.smpl.trans]
138
+
139
+ frame.smpl.pose[3:] = null_pose
140
+ if frame.keypoints[2, 0] > frame.keypoints[5, 0]:
141
+ frame.smpl.pose[0] = 0
142
+ frame.smpl.pose[2] = np.pi
143
+
144
+ E = {
145
+ 'init_pose': frame.pose_obj[[0, 2, 5, 8, 11]],
146
+ }
147
+
148
+ ch.minimize(
149
+ E,
150
+ x0,
151
+ method='dogleg',
152
+ options={
153
+ 'e_3': .1,
154
+ },
155
+ callback=get_cb(viz_rn, frame)
156
+ )
157
+
158
+ E = {
159
+ 'pose': GMOf(frame.pose_obj, 100),
160
+ 'prior': frame.pose_prior_obj * 8.,
161
+ }
162
+
163
+ x0 = [frame.smpl.trans]
164
+
165
+ if nohands:
166
+ x0.append(frame.smpl.pose[range(21) + range(27, 30) + range(36, 60)])
167
+ else:
168
+ x0.append(frame.smpl.pose[range(21) + range(27, 30) + range(36, 72)])
169
+
170
+ ch.minimize(
171
+ E,
172
+ x0,
173
+ method='dogleg',
174
+ options={
175
+ 'e_3': .01,
176
+ },
177
+ callback=get_cb(viz_rn, frame)
178
+ )
179
+
180
+
181
+ def fit_pose(frame, last_smpl, frustum, nohands, viz_rn):
182
+
183
+ if nohands:
184
+ faces = faces_no_hands(frame.smpl.f)
185
+ else:
186
+ faces = frame.smpl.f
187
+
188
+ dst_type = cv2.cv.CV_DIST_L2 if cv2.__version__[0] == '2' else cv2.DIST_L2
189
+
190
+ dist_i = cv2.distanceTransform(np.uint8(frame.mask * 255), dst_type, 5) - 1
191
+ dist_i[dist_i < 0] = 0
192
+ dist_i[dist_i > 50] = 50
193
+ dist_o = cv2.distanceTransform(255 - np.uint8(frame.mask * 255), dst_type, 5)
194
+ dist_o[dist_o > 50] = 50
195
+
196
+ rn_m = ColoredRenderer(camera=frame.camera, v=frame.smpl, f=faces, vc=np.ones_like(frame.smpl), frustum=frustum,
197
+ bgcolor=0, num_channels=1)
198
+
199
+ E = {
200
+ 'mask': gaussian_pyramid(rn_m * dist_o * 100. + (1 - rn_m) * dist_i, n_levels=4, normalization='size') * 80.,
201
+ '2dpose': GMOf(frame.pose_obj, 100),
202
+ 'prior': frame.pose_prior_obj * 4.,
203
+ 'sp': frame.collision_obj * 1e3,
204
+ }
205
+
206
+ if last_smpl is not None:
207
+ E['last_pose'] = GMOf(frame.smpl.pose - last_smpl.pose, 0.05) * 50.
208
+ E['last_trans'] = GMOf(frame.smpl.trans - last_smpl.trans, 0.05) * 50.
209
+
210
+ if nohands:
211
+ x0 = [frame.smpl.pose[range(21) + range(27, 30) + range(36, 60)], frame.smpl.trans]
212
+ else:
213
+ x0 = [frame.smpl.pose[range(21) + range(27, 30) + range(36, 72)], frame.smpl.trans]
214
+
215
+ ch.minimize(
216
+ E,
217
+ x0,
218
+ method='dogleg',
219
+ options={
220
+ 'e_3': .01,
221
+ },
222
+ callback=get_cb(viz_rn, frame)
223
+ )
224
+
225
+
226
+ def main(keypoint_file, masks_file, camera_file, out, model_file, prior_file, resize,
227
+ body_height, nohands, display):
228
+
229
+ # load data
230
+ with open(model_file, 'rb') as fp:
231
+ model_data = pkl.load(fp)
232
+
233
+ with open(camera_file, 'rb') as fp:
234
+ camera_data = pkl.load(fp)
235
+
236
+ with open(prior_file, 'rb') as fp:
237
+ prior_data = pkl.load(fp)
238
+
239
+ if 'basicModel_f' in model_file:
240
+ regs = np.load('vendor/smplify/models/regressors_locked_normalized_female.npz')
241
+ b2m = np.load('assets/b2m_f.npy')
242
+ else:
243
+ regs = np.load('vendor/smplify/models/regressors_locked_normalized_male.npz')
244
+ b2m = np.load('assets/b2m_m.npy')
245
+
246
+ keypoints = h5py.File(keypoint_file, 'r')['keypoints']
247
+ masks = h5py.File(masks_file, 'r')['masks']
248
+ num_frames = masks.shape[0]
249
+
250
+ # init
251
+ base_smpl = Smpl(model_data)
252
+ base_smpl.trans[:] = np.array([0, 0, 3])
253
+ base_smpl.pose[0] = np.pi
254
+ base_smpl.pose[3:] = prior_data['mean']
255
+
256
+ camera = ProjectPoints(t=np.zeros(3), rt=np.zeros(3), c=camera_data['camera_c'] * resize,
257
+ f=camera_data['camera_f'] * resize, k=camera_data['camera_k'], v=base_smpl)
258
+ frustum = {'near': 0.1, 'far': 1000.,
259
+ 'width': int(camera_data['width'] * resize), 'height': int(camera_data['height'] * resize)}
260
+
261
+ if display:
262
+ debug_cam = ProjectPoints(v=base_smpl, t=camera.t, rt=camera.rt, c=camera.c, f=camera.f, k=camera.k)
263
+ debug_light = LambertianPointLight(f=base_smpl.f, v=base_smpl, num_verts=len(base_smpl), light_pos=np.zeros(3),
264
+ vc=np.ones(3), light_color=np.ones(3))
265
+ debug_rn = ColoredRenderer(camera=debug_cam, v=base_smpl, f=base_smpl.f, vc=debug_light, frustum=frustum)
266
+ else:
267
+ debug_rn = None
268
+
269
+ # generic frame loading function
270
+ def create_frame(i, smpl, copy=True):
271
+ f = FrameData()
272
+
273
+ f.smpl = copy_smpl(smpl, model_data) if copy else smpl
274
+ f.camera = ProjectPoints(v=f.smpl, t=camera.t, rt=camera.rt, c=camera.c, f=camera.f, k=camera.k)
275
+
276
+ f.keypoints = np.array(keypoints[i]).reshape(-1, 3) * np.array([resize, resize, 1])
277
+ f.J = joints_coco(f.smpl)
278
+ f.J_proj = ProjectPoints(v=f.J, t=camera.t, rt=camera.rt, c=camera.c, f=camera.f, k=camera.k)
279
+ f.mask = cv2.resize(np.array(masks[i], dtype=np.float32), (0, 0),
280
+ fx=resize, fy=resize, interpolation=cv2.INTER_NEAREST)
281
+
282
+ f.collision_obj = collision_obj(f.smpl, regs)
283
+ f.pose_prior_obj = pose_prior_obj(f.smpl, prior_data)
284
+ f.pose_obj = (f.J_proj - f.keypoints[:, :2]) * f.keypoints[:, 2].reshape(-1, 1)
285
+
286
+ return f
287
+
288
+ base_frame = create_frame(0, base_smpl, copy=False)
289
+
290
+ # get betas from 5 frames
291
+ log.info('Initial fit')
292
+
293
+ num_init = 5
294
+ indices_init = np.ceil(np.arange(num_init) * num_frames * 1. / num_init).astype(np.int)
295
+
296
+ init_frames = [base_frame]
297
+ for i in indices_init[1:]:
298
+ init_frames.append(create_frame(i, base_smpl))
299
+
300
+ init(init_frames, body_height, b2m, debug_rn)
301
+
302
+ # get pose frame by frame
303
+ with h5py.File(out, 'w') as fp:
304
+ last_smpl = None
305
+ poses_dset = fp.create_dataset("pose", (num_frames, 72), 'f', chunks=True, compression="lzf")
306
+ trans_dset = fp.create_dataset("trans", (num_frames, 3), 'f', chunks=True, compression="lzf")
307
+ betas_dset = fp.create_dataset("betas", (10,), 'f', chunks=True, compression="lzf")
308
+
309
+ for i in xrange(num_frames):
310
+ if i == 0:
311
+ current_frame = base_frame
312
+ else:
313
+ current_frame = create_frame(i, last_smpl)
314
+
315
+ log.info('Fit frame {}'.format(i))
316
+ # re-init if necessary
317
+ reinit_frame(current_frame, prior_data['mean'], nohands, debug_rn)
318
+ # final fit
319
+ fit_pose(current_frame, last_smpl, frustum, nohands, debug_rn)
320
+
321
+ poses_dset[i] = current_frame.smpl.pose.r
322
+ trans_dset[i] = current_frame.smpl.trans.r
323
+
324
+ if i == 0:
325
+ betas_dset[:] = current_frame.smpl.betas.r
326
+
327
+ last_smpl = current_frame.smpl
328
+
329
+ log.info('Done.')
330
+
331
+
332
+ if __name__ == '__main__':
333
+ parser = argparse.ArgumentParser()
334
+
335
+ parser.add_argument(
336
+ 'keypoint_file',
337
+ type=str,
338
+ help="File that contains 2D keypoint detections")
339
+ parser.add_argument(
340
+ 'masks_file',
341
+ type=str,
342
+ help="File that contains segmentations")
343
+ parser.add_argument(
344
+ 'camera',
345
+ type=str,
346
+ help="pkl file that contains camera settings")
347
+ parser.add_argument(
348
+ 'out',
349
+ type=str,
350
+ help="Out file path")
351
+ parser.add_argument(
352
+ '--model', '-m',
353
+ default='vendor/smpl/models/basicmodel_m_lbs_10_207_0_v1.1.0.pkl',
354
+ help='Path to SMPL model')
355
+ parser.add_argument(
356
+ '--prior', '-p',
357
+ default='assets/prior_a_pose.pkl',
358
+ help='Path to pose prior')
359
+ parser.add_argument(
360
+ '--resize', '-r', default=0.5, type=float,
361
+ help="Resize factor")
362
+ parser.add_argument(
363
+ '--body_height', '-bh', default=None, type=float,
364
+ help="Height of the subject in meters (optional)")
365
+ parser.add_argument(
366
+ '--nohands', '-nh',
367
+ action='store_true',
368
+ help="Exclude hands from optimization")
369
+ parser.add_argument(
370
+ '--display', '-d',
371
+ action='store_true',
372
+ help="Enable visualization")
373
+
374
+ args = parser.parse_args()
375
+
376
+ main(args.keypoint_file, args.masks_file, args.camera, args.out, args.model, args.prior, args.resize,
377
+ args.body_height, args.nohands, args.display)
videoavatars/step2_consensus.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python2
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import h5py
5
+ import argparse
6
+ import numpy as np
7
+ import chumpy as ch
8
+ import cPickle as pkl
9
+
10
+ from opendr.camera import ProjectPoints
11
+ from opendr.renderer import BoundaryRenderer, ColoredRenderer
12
+ from tqdm import tqdm
13
+
14
+ from util import im, mesh
15
+ from util.logger import log
16
+ from lib.frame import setup_frame_rays
17
+ from lib.rays import ray_objective
18
+ from lib.geometry import laplacian
19
+ from lib.ch import sp_dot
20
+ from models.smpl import Smpl
21
+ from models.bodyparts import faces_no_hands, regularize_laplace, regularize_model, regularize_symmetry
22
+
23
+
24
+ def get_cb(frame, base_smpl, camera, frustum):
25
+ viz_mask = frame.mask / 255.
26
+ base_smpl.pose[:] = frame.pose
27
+ camera.t[:] = frame.trans
28
+ camera.rt[:] = 0
29
+
30
+ rn = ColoredRenderer(camera=camera, v=base_smpl, f=base_smpl.f, vc=np.ones_like(base_smpl),
31
+ frustum=frustum, bgcolor=0, num_channels=1)
32
+
33
+ def cb(_):
34
+ silh_diff = (rn.r - viz_mask + 1) / 2.
35
+ im.show(silh_diff, waittime=1)
36
+
37
+ return cb
38
+
39
+
40
+ def fit_consensus(frames, base_smpl, camera, frustum, model_data, nohands, icp_count, naked, display):
41
+ if nohands:
42
+ faces = faces_no_hands(base_smpl.f)
43
+ else:
44
+ faces = base_smpl.f
45
+
46
+ vis_rn_b = BoundaryRenderer(camera=camera, frustum=frustum, f=faces, num_channels=1)
47
+ vis_rn_m = ColoredRenderer(camera=camera, frustum=frustum, f=faces, vc=np.zeros_like(base_smpl), bgcolor=1,
48
+ num_channels=1)
49
+
50
+ model_template = Smpl(model_data)
51
+ model_template.betas[:] = base_smpl.betas.r
52
+
53
+ g_laplace = regularize_laplace()
54
+ g_model = regularize_model()
55
+ g_symmetry = regularize_symmetry()
56
+
57
+ for step, (w_laplace, w_model, w_symmetry, sigma) in enumerate(zip(
58
+ np.linspace(6.5, 4.0, icp_count) if naked else np.linspace(4.0, 2.0, icp_count),
59
+ np.linspace(0.9, 0.6, icp_count) if naked else np.linspace(0.6, 0.3, icp_count),
60
+ np.linspace(3.6, 1.8, icp_count),
61
+ np.linspace(0.06, 0.003, icp_count),
62
+ )):
63
+ log.info('# Step {}'.format(step))
64
+
65
+ L = laplacian(model_template.r, base_smpl.f)
66
+ delta = L.dot(model_template.r)
67
+
68
+ w_laplace *= g_laplace.reshape(-1, 1)
69
+ w_model *= g_model.reshape(-1, 1)
70
+ w_symmetry *= g_symmetry.reshape(-1, 1)
71
+
72
+ E = {
73
+ 'laplace': (sp_dot(L, base_smpl.v_shaped_personal) - delta) * w_laplace,
74
+ 'model': (base_smpl.v_shaped_personal - model_template) * w_model,
75
+ 'symmetry': (base_smpl.v_personal + np.array([1, -1, -1])
76
+ * base_smpl.v_personal[model_data['vert_sym_idxs']]) * w_symmetry,
77
+ }
78
+
79
+ log.info('## Matching rays with contours')
80
+ for current, f in enumerate(tqdm(frames)):
81
+ E['silh_{}'.format(current)] = ray_objective(f, sigma, base_smpl, camera, vis_rn_b, vis_rn_m)
82
+
83
+ log.info('## Run optimization')
84
+ ch.minimize(
85
+ E,
86
+ [base_smpl.v_personal, model_template.betas],
87
+ method='dogleg',
88
+ options={'maxiter': 15, 'e_3': 0.001},
89
+ callback=get_cb(frames[0], base_smpl, camera, frustum) if display else None
90
+ )
91
+
92
+
93
+ def main(pose_file, masks_file, camera_file, out, obj_out, num, icp_count, model_file, first_frame, last_frame,
94
+ nohands, naked, display):
95
+
96
+ # load data
97
+ with open(model_file, 'rb') as fp:
98
+ model_data = pkl.load(fp)
99
+
100
+ with open(camera_file, 'rb') as fp:
101
+ camera_data = pkl.load(fp)
102
+
103
+ pose_data = h5py.File(pose_file, 'r')
104
+ poses = pose_data['pose'][first_frame:last_frame]
105
+ trans = pose_data['trans'][first_frame:last_frame]
106
+ masks = h5py.File(masks_file, 'r')['masks'][first_frame:last_frame]
107
+ num_frames = masks.shape[0]
108
+
109
+ indices_consensus = np.ceil(np.arange(num) * num_frames * 1. / num).astype(np.int)
110
+
111
+ # init
112
+ base_smpl = Smpl(model_data)
113
+ base_smpl.betas[:] = np.array(pose_data['betas'], dtype=np.float32)
114
+
115
+ camera = ProjectPoints(t=np.zeros(3), rt=np.zeros(3), c=camera_data['camera_c'],
116
+ f=camera_data['camera_f'], k=camera_data['camera_k'], v=base_smpl)
117
+ camera_t = camera_data['camera_t']
118
+ camera_rt = camera_data['camera_rt']
119
+ frustum = {'near': 0.1, 'far': 1000., 'width': int(camera_data['width']), 'height': int(camera_data['height'])}
120
+ frames = []
121
+
122
+ for i in indices_consensus:
123
+ log.info('Set up frame {}...'.format(i))
124
+
125
+ mask = np.array(masks[i] * 255, dtype=np.uint8)
126
+ pose_i = np.array(poses[i], dtype=np.float32)
127
+ trans_i = np.array(trans[i], dtype=np.float32)
128
+
129
+ frames.append(setup_frame_rays(base_smpl, camera, camera_t, camera_rt, pose_i, trans_i, mask))
130
+
131
+ log.info('Set up complete.')
132
+ log.info('Begin consensus fit...')
133
+ fit_consensus(frames, base_smpl, camera, frustum, model_data, nohands, icp_count, naked, display)
134
+
135
+ with open(out, 'wb') as fp:
136
+ pkl.dump({
137
+ 'v_personal': base_smpl.v_personal.r,
138
+ 'betas': base_smpl.betas.r,
139
+ }, fp, protocol=2)
140
+
141
+ if obj_out is not None:
142
+ base_smpl.pose[:] = 0
143
+ vt = np.load('assets/basicModel_vt.npy')
144
+ ft = np.load('assets/basicModel_ft.npy')
145
+ mesh.write(obj_out, base_smpl.r, base_smpl.f, vt=vt, ft=ft)
146
+
147
+ log.info('Done.')
148
+
149
+
150
+ if __name__ == '__main__':
151
+ parser = argparse.ArgumentParser()
152
+ parser.add_argument(
153
+ 'pose_file',
154
+ type=str,
155
+ help="File that contains poses")
156
+ parser.add_argument(
157
+ 'masks_file',
158
+ type=str,
159
+ help="File that contains segmentations")
160
+ parser.add_argument(
161
+ 'camera',
162
+ type=str,
163
+ help="pkl file that contains camera settings")
164
+ parser.add_argument(
165
+ 'out',
166
+ type=str,
167
+ help="Out file path")
168
+ parser.add_argument(
169
+ '--obj_out', '-oo',
170
+ default=None,
171
+ help='obj out file name (optional)')
172
+ parser.add_argument(
173
+ '--num', '-n', default=120, type=int,
174
+ help="Number of used frames")
175
+ parser.add_argument(
176
+ '--icp', '-i', default=3, type=int,
177
+ help="ICP Iterations")
178
+ parser.add_argument(
179
+ '--model', '-m',
180
+ default='vendor/smpl/models/basicmodel_m_lbs_10_207_0_v1.0.0.pkl',
181
+ help='Path to SMPL model')
182
+ parser.add_argument(
183
+ '--first_frame', '-f', default=0, type=int,
184
+ help="First frame to use")
185
+ parser.add_argument(
186
+ '--last_frame', '-l', default=2000, type=int,
187
+ help="Last frame to use")
188
+ parser.add_argument(
189
+ '--nohands', '-nh',
190
+ action='store_true',
191
+ help="Exclude hands from optimization")
192
+ parser.add_argument(
193
+ '--naked', '-nk',
194
+ action='store_true',
195
+ help="Person wears (almost) no clothing")
196
+ parser.add_argument(
197
+ '--display', '-d',
198
+ action='store_true',
199
+ help="Enable visualization")
200
+
201
+ args = parser.parse_args()
202
+
203
+ main(args.pose_file, args.masks_file, args.camera, args.out, args.obj_out, args.num, args.icp, args.model,
204
+ args.first_frame, args.last_frame, args.nohands, args.naked, args.display)
videoavatars/step3_texture.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python2
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import cv2
5
+ import h5py
6
+ import argparse
7
+ import numpy as np
8
+ import cPickle as pkl
9
+
10
+ from opendr.renderer import ColoredRenderer
11
+ from opendr.camera import ProjectPoints
12
+ from opendr.geometry import VertNormals
13
+ from tex.iso import Isomapper, IsoColoredRenderer
14
+
15
+ from util import im
16
+ from util.logger import log
17
+ from models.smpl import Smpl
18
+
19
+
20
+ def main(consensus_file, camera_file, video_file, pose_file, masks_file, out, model_file, resolution, num,
21
+ first_frame, last_frame, display):
22
+ # load data
23
+ with open(model_file, 'rb') as fp:
24
+ model_data = pkl.load(fp)
25
+
26
+ with open(camera_file, 'rb') as fp:
27
+ camera_data = pkl.load(fp)
28
+
29
+ with open(consensus_file, 'rb') as fp:
30
+ consensus_data = pkl.load(fp)
31
+
32
+ pose_data = h5py.File(pose_file, 'r')
33
+ poses = pose_data['pose'][first_frame:last_frame]
34
+ trans = pose_data['trans'][first_frame:last_frame]
35
+ masks = h5py.File(masks_file, 'r')['masks'][first_frame:last_frame]
36
+ num_frames = masks.shape[0]
37
+ indices_texture = np.ceil(np.arange(num) * num_frames * 1. / num).astype(np.int)
38
+
39
+ vt = np.load('assets/basicModel_vt.npy')
40
+ ft = np.load('assets/basicModel_ft.npy')
41
+
42
+ # init
43
+ base_smpl = Smpl(model_data)
44
+ base_smpl.betas[:] = consensus_data['betas']
45
+ base_smpl.v_personal[:] = consensus_data['v_personal']
46
+
47
+ bgcolor = np.array([1., 0.2, 1.])
48
+ iso = Isomapper(vt, ft, base_smpl.f, resolution, bgcolor=bgcolor)
49
+ iso_vis = IsoColoredRenderer(vt, ft, base_smpl.f, resolution)
50
+ camera = ProjectPoints(t=camera_data['camera_t'], rt=camera_data['camera_rt'], c=camera_data['camera_c'],
51
+ f=camera_data['camera_f'], k=camera_data['camera_k'], v=base_smpl)
52
+ frustum = {'near': 0.1, 'far': 1000., 'width': int(camera_data['width']), 'height': int(camera_data['height'])}
53
+ rn_vis = ColoredRenderer(f=base_smpl.f, frustum=frustum, camera=camera, num_channels=1)
54
+
55
+ cap = cv2.VideoCapture(video_file)
56
+ for _ in range(first_frame):
57
+ cap.grab()
58
+
59
+ # get part-textures
60
+ i = first_frame
61
+
62
+ tex_agg = np.zeros((resolution, resolution, 25, 3))
63
+ tex_agg[:] = np.nan
64
+ normal_agg = np.ones((resolution, resolution, 25)) * 0.2
65
+
66
+ vn = VertNormals(f=base_smpl.f, v=base_smpl)
67
+ static_indices = np.indices((resolution, resolution))
68
+
69
+ while cap.isOpened() and i < indices_texture[-1]:
70
+ if i in indices_texture:
71
+ log.info('Getting part texture from frame {}...'.format(i))
72
+ _, frame = cap.read()
73
+
74
+ mask = np.array(masks[i], dtype=np.uint8)
75
+ pose_i = np.array(poses[i], dtype=np.float32)
76
+ trans_i = np.array(trans[i], dtype=np.float32)
77
+
78
+ base_smpl.pose[:] = pose_i
79
+ base_smpl.trans[:] = trans_i
80
+
81
+ # which faces have been seen and are projected into the silhouette?
82
+ visibility = rn_vis.visibility_image.ravel()
83
+ visible = np.nonzero(visibility != 4294967295)[0]
84
+
85
+ proj = camera.r
86
+ in_viewport = np.logical_and(
87
+ np.logical_and(np.round(camera.r[:, 0]) >= 0, np.round(camera.r[:, 0]) < frustum['width']),
88
+ np.logical_and(np.round(camera.r[:, 1]) >= 0, np.round(camera.r[:, 1]) < frustum['height']),
89
+ )
90
+ in_mask = np.zeros(camera.shape[0], dtype=np.bool)
91
+ idx = np.round(proj[in_viewport][:, [1, 0]].T).astype(np.int).tolist()
92
+ in_mask[in_viewport] = mask[idx]
93
+
94
+ faces_in_mask = np.where(np.min(in_mask[base_smpl.f], axis=1))[0]
95
+ visible_faces = np.intersect1d(faces_in_mask, visibility[visible])
96
+
97
+ # get the current unwrap
98
+ part_tex = iso.render(frame / 255., camera, visible_faces)
99
+
100
+ # angle under which the texels have been seen
101
+ points = np.hstack((proj, np.ones((proj.shape[0], 1))))
102
+ points3d = camera.unproject_points(points)
103
+ points3d /= np.linalg.norm(points3d, axis=1).reshape(-1, 1)
104
+ alpha = np.sum(points3d * -vn.r, axis=1).reshape(-1, 1)
105
+ alpha[alpha < 0] = 0
106
+ iso_normals = iso_vis.render(alpha)[:, :, 0]
107
+ iso_normals[np.all(part_tex == bgcolor, axis=2)] = 0
108
+
109
+ # texels to consider
110
+ part_mask = np.zeros((resolution, resolution))
111
+ min_normal = np.min(normal_agg, axis=2)
112
+ part_mask[iso_normals > min_normal] = 1.
113
+
114
+ # update best seen texels
115
+ where = np.argmax(np.atleast_3d(iso_normals) - normal_agg, axis=2)
116
+
117
+ idx = np.dstack((static_indices[0], static_indices[1], where))[part_mask == 1]
118
+ tex_agg[list(idx[:, 0]), list(idx[:, 1]), list(idx[:, 2])] = part_tex[part_mask == 1]
119
+ normal_agg[list(idx[:, 0]), list(idx[:, 1]), list(idx[:, 2])] = iso_normals[part_mask == 1]
120
+
121
+ if display:
122
+ im.show(part_tex, id='part_tex', waittime=1)
123
+
124
+ else:
125
+ cap.grab()
126
+
127
+ i += 1
128
+
129
+ # merge textures
130
+ log.info('Computing median texture...')
131
+ tex_median = np.nanmedian(tex_agg, axis=2)
132
+
133
+ log.info('Inpainting unseen areas...')
134
+ where = np.max(normal_agg, axis=2) > 0.2
135
+
136
+ tex_mask = iso.iso_mask
137
+ mask_final = np.float32(where)
138
+
139
+ kernel_size = np.int(resolution * 0.02)
140
+ kernel = np.ones((kernel_size, kernel_size), np.uint8)
141
+ inpaint_area = cv2.dilate(tex_mask, kernel) - mask_final
142
+
143
+ tex_final = cv2.inpaint(np.uint8(tex_median * 255), np.uint8(inpaint_area * 255), 3, cv2.INPAINT_TELEA)
144
+
145
+ cv2.imwrite(out, tex_final)
146
+ log.info('Done.')
147
+
148
+
149
+ if __name__ == '__main__':
150
+ parser = argparse.ArgumentParser()
151
+
152
+ parser.add_argument(
153
+ 'consensus',
154
+ type=str,
155
+ help="pkl file that contains consensus")
156
+ parser.add_argument(
157
+ 'camera',
158
+ type=str,
159
+ help="pkl file that contains camera settings")
160
+ parser.add_argument(
161
+ 'video',
162
+ type=str,
163
+ help="Input video")
164
+ parser.add_argument(
165
+ 'pose_file',
166
+ type=str,
167
+ help="File that contains poses")
168
+ parser.add_argument(
169
+ 'masks_file',
170
+ type=str,
171
+ help="File that contains segmentations")
172
+ parser.add_argument(
173
+ 'out',
174
+ type=str,
175
+ help="Out file path")
176
+ parser.add_argument(
177
+ '--model', '-m',
178
+ default='vendor/smpl/models/basicmodel_m_lbs_10_207_0_v1.0.0.pkl',
179
+ help='Path to SMPL model')
180
+ parser.add_argument(
181
+ '--resolution', '-r', default=1000, type=int,
182
+ help="Output resolution")
183
+ parser.add_argument(
184
+ '--num', '-n', default=120, type=int,
185
+ help="Number of used frames")
186
+ parser.add_argument(
187
+ '--first_frame', '-f', default=0, type=int,
188
+ help="First frame to use")
189
+ parser.add_argument(
190
+ '--last_frame', '-l', default=2000, type=int,
191
+ help="Last frame to use")
192
+ parser.add_argument(
193
+ '--display', '-d',
194
+ action='store_true',
195
+ help="Enable visualization")
196
+
197
+ args = parser.parse_args()
198
+
199
+ main(args.consensus, args.camera, args.video, args.pose_file, args.masks_file, args.out, args.model,
200
+ args.resolution, args.num, args.first_frame, args.last_frame, args.display)
videoavatars/tex/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #!/usr/bin/env python2
2
+ # -*- coding: utf-8 -*-
videoavatars/tex/iso.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python2
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import numpy as np
5
+
6
+ from render.renderer import OrthoTexturedRenderer, OrthoColoredRenderer
7
+ from render.camera import OrthoProjectPoints
8
+
9
+
10
+ class Isomapper():
11
+ def __init__(self, vt, ft, f, tex_res, bgcolor=np.zeros(3)):
12
+ vt3d = np.dstack((vt[:, 0] - 0.5, 1 - vt[:, 1] - 0.5, np.zeros(vt.shape[0])))[0]
13
+ ortho = OrthoProjectPoints(rt=np.zeros(3), t=np.zeros(3), near=-1, far=1, left=-0.5, right=0.5, bottom=-0.5,
14
+ top=0.5, width=tex_res, height=tex_res)
15
+ self.tex_res = tex_res
16
+ self.f = ft
17
+ self.ft = f
18
+ self.rn_tex = OrthoTexturedRenderer(v=vt3d, f=ft, ortho=ortho, vc=np.ones_like(vt3d), bgcolor=bgcolor)
19
+ self.rn_vis = OrthoColoredRenderer(v=vt3d, f=ft, ortho=ortho, vc=np.ones_like(vt3d), bgcolor=np.zeros(3),
20
+ num_channels=1)
21
+ self.bgcolor = bgcolor
22
+ self.iso_mask = np.array(self.rn_vis.r)
23
+
24
+ def render(self, frame, proj_v, visible_faces=None):
25
+ h, w, _ = np.atleast_3d(frame).shape
26
+ v2d = proj_v.r
27
+ v2d_as_vt = np.dstack((v2d[:, 0] / w, 1 - v2d[:, 1] / h))[0]
28
+
29
+ self.rn_tex.set(texture_image=frame, vt=v2d_as_vt, ft=self.ft)
30
+ tex = np.array(self.rn_tex.r)
31
+
32
+ if visible_faces is not None:
33
+ self.rn_vis.set(f=self.f[visible_faces])
34
+ mask = np.atleast_3d(self.rn_vis.r)
35
+ tex = mask * tex + (1 - mask) * self.bgcolor
36
+
37
+ return tex
38
+
39
+
40
+ class IsoColoredRenderer:
41
+ def __init__(self, vt, ft, f, tex_res):
42
+ ortho = OrthoProjectPoints(rt=np.zeros(3), t=np.zeros(3), near=-1, far=1, left=-0.5, right=0.5, bottom=-0.5,
43
+ top=0.5, width=tex_res, height=tex_res)
44
+ vt3d = np.dstack((vt[:, 0] - 0.5, 1 - vt[:, 1] - 0.5, np.zeros(vt.shape[0])))[0]
45
+ vt3d = vt3d[ft].reshape(-1, 3)
46
+ self.f = f
47
+ self.rn = OrthoColoredRenderer(bgcolor=np.zeros(3), ortho=ortho, v=vt3d, f=np.arange(ft.size).reshape(-1, 3))
48
+
49
+ def render(self, vc):
50
+ vc = np.atleast_3d(vc)
51
+
52
+ if vc.shape[2] == 1:
53
+ vc = np.hstack((vc, vc, vc))
54
+
55
+ self.rn.set(vc=vc[self.f].reshape(-1, 3))
56
+
57
+ return np.array(self.rn.r)
videoavatars/util/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ #!/usr/bin/env python2
2
+ # -*- coding: utf-8 -*-
videoavatars/util/__init__.pyc ADDED
Binary file (163 Bytes). View file
 
videoavatars/util/im.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python2
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import numpy as np
5
+ import matplotlib.pyplot as plt
6
+
7
+
8
+ _figures = {}
9
+
10
+
11
+ def show(im, waittime=0, id='plt', max_width=600):
12
+ plt.ion()
13
+ w = min(im.shape[1], max_width)
14
+ h = max_width * (1.0 * im.shape[0]) / im.shape[1] if w == max_width else im.shape[0]
15
+ plt.figure(id, figsize=(w / 80, h / 80), dpi=80)
16
+
17
+ ax = plt.axes([0, 0, 1, 1], frameon=False)
18
+ ax.get_xaxis().set_visible(False)
19
+ ax.get_yaxis().set_visible(False)
20
+
21
+ if np.issubdtype(im.dtype, np.floating):
22
+ if np.max(im) > 1:
23
+ factor = 255 / np.max(im)
24
+ else:
25
+ factor = 255
26
+ else:
27
+ factor = 1
28
+
29
+ if np.atleast_3d(im).shape[2] == 3:
30
+ data = np.uint8(im * factor)[:, :, ::-1]
31
+ else:
32
+ data = np.uint8(np.dstack((im, im, im)) * factor)
33
+
34
+ if id in _figures and plt.fignum_exists(id):
35
+ _figures[id].set_array(data)
36
+ else:
37
+ _figures[id] = plt.imshow(data)
38
+
39
+ if waittime == 0:
40
+ plt.waitforbuttonpress()
41
+ else:
42
+ plt.pause(waittime / 1000.)
43
+ plt.ioff()
videoavatars/util/im.pyc ADDED
Binary file (1.47 kB). View file
 
videoavatars/util/logger.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python2
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import logging
5
+
6
+ __all__ = ['log']
7
+
8
+
9
+ FORMAT = "%(asctime)s [%(filename)s:%(funcName)s:%(lineno)d] %(message)s"
10
+
11
+ log = logging.getLogger(__name__)
12
+ logging.basicConfig(level=logging.INFO, format=FORMAT)
videoavatars/util/logger.pyc ADDED
Binary file (429 Bytes). View file
 
videoavatars/util/mesh.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python2
2
+ # -*- coding: utf-8 -*-
3
+
4
+ import os
5
+ import numpy as np
6
+
7
+
8
+ def write(filename, v, f, vt=None, ft=None, vn=None, vc=None, texture=None):
9
+ with open(filename, 'w') as fp:
10
+ if texture is not None:
11
+ mat_file = filename.replace('obj', 'mtl')
12
+
13
+ fp.write('mtllib {}\n'.format(os.path.basename(mat_file)))
14
+ fp.write('usemtl mat\n')
15
+
16
+ with open(mat_file, 'w') as mfp:
17
+ mfp.write('newmtl mat\n')
18
+ mfp.write('Ka 1.0 1.0 1.0\n')
19
+ mfp.write('Kd 1.0 1.0 1.0\n')
20
+ mfp.write('Ks 0.0 0.0 0.0\n')
21
+ mfp.write('d 1.0\n')
22
+ mfp.write('Ns 0.0\n')
23
+ mfp.write('illum 0\n')
24
+ mfp.write('map_Kd {}\n'.format(texture))
25
+
26
+ if vc is not None:
27
+ fp.write(('v {:f} {:f} {:f} {:f} {:f} {:f}\n' * len(v)).format(*np.hstack((v, vc)).reshape(-1)))
28
+ else:
29
+ fp.write(('v {:f} {:f} {:f}\n' * len(v)).format(*v.reshape(-1)))
30
+
31
+ if vn is not None:
32
+ fp.write(('vn {:f} {:f} {:f}\n' * len(vn)).format(*vn.reshape(-1)))
33
+
34
+ if vt is not None:
35
+ fp.write(('vt {:f} {:f}\n' * len(vt)).format(*vt.reshape(-1)))
36
+
37
+ if ft is not None:
38
+ fp.write(('f {:d}/{:d}/{:d} {:d}/{:d}/{:d} {:d}/{:d}/{:d}\n' * len(f)).format(*np.hstack((f.reshape(-1, 1), ft.reshape(-1, 1), f.reshape(-1, 1))).reshape(-1) + 1))
39
+ else:
40
+ fp.write(('f {:d}//{:d} {:d}//{:d} {:d}//{:d}\n' * len(f)).format(*np.repeat(f.reshape(-1) + 1, 2)))