quocanh34 commited on
Commit
af16720
1 Parent(s): 58dbd55

Upload utils_mask.py

Browse files
Files changed (1) hide show
  1. utils_mask.py +498 -0
utils_mask.py ADDED
@@ -0,0 +1,498 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ from PIL import Image, ImageDraw, ImageOps
4
+ import torch
5
+ import matplotlib.pyplot as plt
6
+
7
+ import apply_net
8
+ from detectron2.data.detection_utils import convert_PIL_to_numpy,_apply_exif_orientation
9
+
10
+ label_map = {
11
+ "background": 0,
12
+ "hat": 1,
13
+ "hair": 2,
14
+ "sunglasses": 3,
15
+ "upper_clothes": 4,
16
+ "skirt": 5,
17
+ "pants": 6,
18
+ "dress": 7,
19
+ "belt": 8,
20
+ "left_shoe": 9,
21
+ "right_shoe": 10,
22
+ "head": 11,
23
+ "left_leg": 12,
24
+ "right_leg": 13,
25
+ "left_arm": 14,
26
+ "right_arm": 15,
27
+ "bag": 16,
28
+ "scarf": 17,
29
+ }
30
+
31
+ dense_map = {
32
+ "background" : [0],
33
+ "torso" : [1,2],
34
+ "right_hand" : [3],
35
+ "left_hand" : [4],
36
+ "left_foot" : [5],
37
+ "right_foot" : [6],
38
+ "upper_leg_right" : [7,9],
39
+ "upper_leg_left" : [8,10],
40
+ "lower_leg_right" : [11,13],
41
+ "lower_leg_left" : [12,14],
42
+ "upper_arm_left" : [15,17],
43
+ "upper_arm_right" : [16,18],
44
+ "lower_arm_left" : [19,21],
45
+ "lower_arm_right" : [20,22],
46
+ "head" : [23,24]
47
+ }
48
+
49
+ def extend_arm_mask(wrist, elbow, scale):
50
+ wrist = elbow + scale * (wrist - elbow)
51
+ return wrist
52
+
53
+ def hole_fill(img):
54
+ img = np.pad(img[1:-1, 1:-1], pad_width = 1, mode = 'constant', constant_values=0)
55
+ img_copy = img.copy()
56
+ mask = np.zeros((img.shape[0] + 2, img.shape[1] + 2), dtype=np.uint8)
57
+
58
+ cv2.floodFill(img, mask, (0, 0), 255)
59
+ img_inverse = cv2.bitwise_not(img)
60
+ dst = cv2.bitwise_or(img_copy, img_inverse)
61
+ return dst
62
+
63
+ def refine_mask(mask):
64
+ contours, hierarchy = cv2.findContours(mask.astype(np.uint8),
65
+ cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_L1)
66
+ area = []
67
+ for j in range(len(contours)):
68
+ a_d = cv2.contourArea(contours[j], True)
69
+ area.append(abs(a_d))
70
+ refine_mask = np.zeros_like(mask).astype(np.uint8)
71
+ if len(area) != 0:
72
+ i = area.index(max(area))
73
+ cv2.drawContours(refine_mask, contours, i, color=255, thickness=-1)
74
+
75
+ return refine_mask
76
+
77
+ def get_mask_location_new(category, model_parse: Image.Image, keypoint: dict, width=384,height=512, dense_pose = None):
78
+
79
+ if category != 'lower_body_shoes' and category != 'lower_body_boots' and category != 'full_body' and category != 'dresses' and category != 'upper_clothes' and category != 'lower_body_pants' and category != 'lower_body_skirts':
80
+ raise ValueError("Category not found")
81
+
82
+
83
+ #mask for lower_body_shoes, lower_body_boots
84
+ if category == 'lower_body_shoes':
85
+ dense_mask = np.zeros((height, width))
86
+ dense_mask += (dense_pose == 5).astype(np.float32) + \
87
+ (dense_pose == 6).astype(np.float32)
88
+
89
+ dense_mask = cv2.dilate(dense_mask, np.ones((5, 5), np.uint16), iterations=5)
90
+
91
+ mask = Image.fromarray(dense_mask.astype(np.uint8) * 255)
92
+ mask_gray = Image.fromarray(dense_mask.astype(np.uint8) * 127)
93
+
94
+ return mask, mask_gray, dense_mask
95
+
96
+
97
+ if category == 'lower_body_boots':
98
+ dense_mask = np.zeros((height, width))
99
+
100
+ dense_mask += (dense_pose == 5).astype(np.float32) + \
101
+ (dense_pose == 6).astype(np.float32) + \
102
+ (dense_pose == 11).astype(np.float32) + \
103
+ (dense_pose == 12).astype(np.float32) + \
104
+ (dense_pose == 13).astype(np.float32) + \
105
+ (dense_pose == 14).astype(np.float32)
106
+
107
+ dense_mask = cv2.dilate(dense_mask, np.ones((5, 5), np.uint16), iterations=5)
108
+
109
+ mask = Image.fromarray(dense_mask.astype(np.uint8) * 255)
110
+ mask_gray = Image.fromarray(dense_mask.astype(np.uint8) * 127)
111
+
112
+ return mask, mask_gray, dense_mask
113
+
114
+ #mask others category
115
+ im_parse = model_parse.resize((width, height), Image.NEAREST)
116
+ parse_array = np.array(im_parse)
117
+
118
+ arm_width = 40
119
+
120
+ parse_head = (parse_array == 1).astype(np.float32) + \
121
+ (parse_array == 3).astype(np.float32) + \
122
+ (parse_array == 11).astype(np.float32)
123
+
124
+ parser_mask_fixed = (parse_array == label_map["left_shoe"]).astype(np.float32) + \
125
+ (parse_array == label_map["right_shoe"]).astype(np.float32) + \
126
+ (parse_array == label_map["hat"]).astype(np.float32) + \
127
+ (parse_array == label_map["sunglasses"]).astype(np.float32) + \
128
+ (parse_array == label_map["bag"]).astype(np.float32)
129
+
130
+ parser_mask_changeable = (parse_array == label_map["background"]).astype(np.float32)
131
+
132
+ arms_left = (parse_array == 14).astype(np.float32)
133
+ arms_right = (parse_array == 15).astype(np.float32)
134
+ arms = arms_left + arms_right
135
+
136
+ if category == 'dresses' or category == 'full_body': # upper_clothes + lower_body_skirts
137
+ parse_mask = (parse_array == 7).astype(np.float32) + \
138
+ (parse_array == 4).astype(np.float32) + \
139
+ (parse_array == 5).astype(np.float32) + \
140
+ (parse_array == 6).astype(np.float32)
141
+
142
+ parser_mask_changeable += np.logical_and(parse_array, np.logical_not(parser_mask_fixed))
143
+
144
+ elif category == 'upper_clothes' : # -> upper_clothes
145
+ parse_mask = (parse_array == 4).astype(np.float32)
146
+ parser_mask_fixed_lower_cloth = (parse_array == label_map["skirt"]).astype(np.float32) + \
147
+ (parse_array == label_map["pants"]).astype(np.float32)
148
+ # parser_mask_fixed += parser_mask_fixed_lower_cloth
149
+ parser_mask_changeable += np.logical_and(parse_array, np.logical_not(parser_mask_fixed))
150
+ elif category == 'lower_body_pants' or category == 'lower_body_skirts': # -> remove
151
+ parse_mask = (parse_array == 6).astype(np.float32) + \
152
+ (parse_array == 12).astype(np.float32) + \
153
+ (parse_array == 13).astype(np.float32) + \
154
+ (parse_array == 5).astype(np.float32)
155
+ parser_mask_fixed += (parse_array == label_map["upper_clothes"]).astype(np.float32) + \
156
+ (parse_array == 14).astype(np.float32) + \
157
+ (parse_array == 15).astype(np.float32)
158
+ parser_mask_changeable += np.logical_and(parse_array, np.logical_not(parser_mask_fixed))
159
+ else:
160
+ raise NotImplementedError
161
+
162
+ # Load pose points
163
+ pose_data = keypoint["pose_keypoints_2d"]
164
+ pose_data = np.array(pose_data)
165
+ pose_data = pose_data.reshape((-1, 2))
166
+
167
+ im_arms_left = Image.new('L', (width, height))
168
+ im_arms_right = Image.new('L', (width, height))
169
+ arms_draw_left = ImageDraw.Draw(im_arms_left)
170
+ arms_draw_right = ImageDraw.Draw(im_arms_right)
171
+ if category == 'dresses' or category == 'upper_clothes' or category == 'full_body':
172
+ shoulder_right = np.multiply(tuple(pose_data[2][:2]), height / 512.0)
173
+ shoulder_left = np.multiply(tuple(pose_data[5][:2]), height / 512.0)
174
+ elbow_right = np.multiply(tuple(pose_data[3][:2]), height / 512.0)
175
+ elbow_left = np.multiply(tuple(pose_data[6][:2]), height / 512.0)
176
+ wrist_right = np.multiply(tuple(pose_data[4][:2]), height / 512.0)
177
+ wrist_left = np.multiply(tuple(pose_data[7][:2]), height / 512.0)
178
+ ARM_LINE_WIDTH = int(arm_width / 512 * height)
179
+ size_left = [shoulder_left[0] - ARM_LINE_WIDTH // 2, shoulder_left[1] - ARM_LINE_WIDTH // 2, shoulder_left[0] + ARM_LINE_WIDTH // 2, shoulder_left[1] + ARM_LINE_WIDTH // 2]
180
+ size_right = [shoulder_right[0] - ARM_LINE_WIDTH // 2, shoulder_right[1] - ARM_LINE_WIDTH // 2, shoulder_right[0] + ARM_LINE_WIDTH // 2,
181
+ shoulder_right[1] + ARM_LINE_WIDTH // 2]
182
+
183
+
184
+ if wrist_right[0] <= 1. and wrist_right[1] <= 1.:
185
+ im_arms_right = arms_right
186
+ else:
187
+ wrist_right = extend_arm_mask(wrist_right, elbow_right, 1.2)
188
+ arms_draw_right.line(np.concatenate((shoulder_right, elbow_right, wrist_right)).astype(np.uint16).tolist(), 'white', ARM_LINE_WIDTH, 'curve')
189
+ arms_draw_right.arc(size_right, 0, 360, 'white', ARM_LINE_WIDTH // 2)
190
+
191
+ if wrist_left[0] <= 1. and wrist_left[1] <= 1.:
192
+ im_arms_left = arms_left
193
+ else:
194
+ wrist_left = extend_arm_mask(wrist_left, elbow_left, 1.2)
195
+ arms_draw_left.line (np.concatenate((wrist_left, elbow_left, shoulder_left)).astype(np.uint16).tolist(), 'white', ARM_LINE_WIDTH, 'curve')
196
+ arms_draw_left.arc(size_left, 0, 360, 'white', ARM_LINE_WIDTH // 2)
197
+
198
+ hands_left = np.logical_and(np.logical_not(im_arms_left), arms_left)
199
+ hands_right = np.logical_and(np.logical_not(im_arms_right), arms_right)
200
+ parser_mask_fixed += hands_left + hands_right
201
+
202
+ parser_mask_fixed = np.logical_or(parser_mask_fixed, parse_head)
203
+ parse_mask = cv2.dilate(parse_mask, np.ones((5, 5), np.uint16), iterations=5)
204
+ if category == 'dresses' or category == 'upper_clothes' or category == 'full_body':
205
+ neck_mask = (parse_array == 18).astype(np.float32)
206
+ neck_mask = cv2.dilate(neck_mask, np.ones((5, 5), np.uint16), iterations=1)
207
+ neck_mask = np.logical_and(neck_mask, np.logical_not(parse_head))
208
+ parse_mask = np.logical_or(parse_mask, neck_mask)
209
+ arm_mask = cv2.dilate(np.logical_or(im_arms_left, im_arms_right).astype('float32'), np.ones((5, 5), np.uint16), iterations=4)
210
+ parse_mask += np.logical_or(parse_mask, arm_mask)
211
+
212
+ # parse_mask_img = Image.fromarray(parse_mask.astype(np.uint8) * 255)
213
+ # parse_mask_img.save("mask_their_pre.png")
214
+
215
+ # parser_mask_changeable_img = Image.fromarray(parse_mask.astype(np.uint8) * 255)
216
+ # parser_mask_changeable_img.save("mask_change.png")
217
+
218
+ parse_mask = np.logical_and(parser_mask_changeable, np.logical_not(parse_mask))
219
+
220
+ #convert parse_mask to iamge and save
221
+ # parse_mask_img = Image.fromarray(parse_mask.astype(np.uint8) * 255)
222
+ # parse_mask_img.save("mask_their.png")
223
+
224
+ #my code
225
+
226
+ #get pose points
227
+ hip_right = np.multiply(tuple(pose_data[8][:2]), height / 512.0)
228
+ hip_left = np.multiply(tuple(pose_data[11][:2]), height / 512.0)
229
+ knee_right = np.multiply(tuple(pose_data[9][:2]), height / 512.0)
230
+ knee_left = np.multiply(tuple(pose_data[12][:2]), height / 512.0)
231
+ ankle_right = np.multiply(tuple(pose_data[10][:2]), height / 512.0)
232
+ ankle_left = np.multiply(tuple(pose_data[13][:2]), height / 512.0)
233
+
234
+ #for upper clothes
235
+ mid_point_left = hip_left + (knee_left - hip_left) / 5
236
+ mid_point_right = hip_right + (knee_right - hip_right) / 5
237
+
238
+ extra_mask = Image.new('L', (width, height))
239
+ extra_draw = ImageDraw.Draw(extra_mask)
240
+
241
+
242
+ #mask for dresses category
243
+ if category == 'dresses' or category == 'lower_body_skirts' or category == 'lower_body_pants':
244
+
245
+ #draw line from 6 points
246
+ if ankle_left[0] != 0 and ankle_right[0] != 0 and ankle_left[1] != 0 and ankle_right[1] != 0:
247
+ extra_draw.line(np.concatenate((ankle_right, ankle_left)).astype(np.uint16).tolist(), 'white', 1, 'curve')
248
+ extra_draw.line(np.concatenate((hip_right, knee_right, ankle_right)).astype(np.uint16).tolist(), 'white', arm_width+20, 'curve')
249
+ extra_draw.line(np.concatenate((hip_left, knee_left, ankle_left)).astype(np.uint16).tolist(), 'white', arm_width+20, 'curve')
250
+ extra_draw.line(np.concatenate((hip_right, hip_left)).astype(np.uint16).tolist(), 'white', 1, 'curve')
251
+ extra_draw.line(np.concatenate((knee_right, knee_left)).astype(np.uint16).tolist(), 'white', 1, 'curve')
252
+
253
+ elif knee_left[0] != 0 and knee_right[0] != 0 and knee_left[1] != 0 and knee_right[1] != 0:
254
+ extra_draw.line(np.concatenate((hip_right, knee_right)).astype(np.uint16).tolist(), 'white', 1, 'curve')
255
+ extra_draw.line(np.concatenate((hip_left, knee_left)).astype(np.uint16).tolist(), 'white', arm_width, 'curve')
256
+ extra_draw.line(np.concatenate((hip_right, hip_left)).astype(np.uint16).tolist(), 'white', arm_width, 'curve')
257
+ else:
258
+ pass
259
+
260
+ if category == 'lower_body_pants':
261
+ extra_mask = hole_fill(np.array(extra_mask))
262
+ extra_mask = cv2.dilate(np.array(extra_mask), np.ones((5, 5), np.uint16), iterations=int((knee_right[1] - hip_right[1])/10))
263
+
264
+ dense = (dense_pose == 1).astype(np.float32) +\
265
+ (dense_pose == 2).astype(np.float32) +\
266
+ (dense_pose == 7).astype(np.float32) +\
267
+ (dense_pose == 8).astype(np.float32) +\
268
+ (dense_pose == 9).astype(np.float32) +\
269
+ (dense_pose == 10).astype(np.float32)
270
+ extra_mask = np.logical_and(extra_mask, dense)
271
+ extra_mask = cv2.dilate((extra_mask * 255).astype(np.uint8), np.ones((5, 5), np.uint16), iterations=5)
272
+ extra_mask = Image.fromarray((extra_mask * 255).astype(np.uint8), 'L')
273
+
274
+ #mask for upper_clothes
275
+ if category == "upper_clothes":
276
+ if knee_left[0] != 0 and knee_right[0] != 0 and knee_left[1] != 0 and knee_right[1] != 0:
277
+
278
+ extra_draw.line(np.concatenate((hip_right, hip_left)).astype(np.uint16).tolist(), 'white', 1, 'curve')
279
+ extra_draw.line(np.concatenate((mid_point_right, mid_point_left)).astype(np.uint16).tolist(), 'white', 1, 'curve')
280
+ extra_draw.line(np.concatenate((hip_right, mid_point_right)).astype(np.uint16).tolist(), 'white', 40, 'curve')
281
+ extra_draw.line(np.concatenate((hip_left, mid_point_left)).astype(np.uint16).tolist(), 'white', 40, 'curve')
282
+ else:
283
+ pass
284
+ extra_mask = cv2.dilate(np.array(extra_mask), np.ones((5, 5), np.uint16), iterations=4)
285
+
286
+ extra_mask = Image.fromarray(hole_fill(np.array(extra_mask)))
287
+
288
+ extra_mask = ImageOps.invert(extra_mask)
289
+ extra_mask.save("mask_mine.png")
290
+
291
+ if category == 'lower_body_pants':
292
+ parse_mask = np.logical_or(parse_mask, parser_mask_fixed)
293
+ parse_mask = np.logical_and(parse_mask, extra_mask)
294
+ else:
295
+ parse_mask = np.logical_and(parse_mask, extra_mask)
296
+ parse_mask = np.logical_or(parse_mask, parser_mask_fixed)
297
+
298
+
299
+ parse_mask_img = Image.fromarray(parse_mask.astype(np.uint8) * 255)
300
+ parse_mask_img.save("mask_all.png")
301
+
302
+ inpaint_mask = 1 - parse_mask
303
+
304
+ #densepose
305
+ if dense_pose is not None:
306
+
307
+ dense_mask = np.zeros((height, width))
308
+ dense_fixed = np.zeros((height, width))
309
+
310
+ dense_foot = (dense_pose == 5).astype(np.float32) + \
311
+ (dense_pose == 6).astype(np.float32)
312
+
313
+ dense_hand = (dense_pose == 3).astype(np.float32) + \
314
+ (dense_pose == 4).astype(np.float32)
315
+
316
+ dense_fixed = dense_foot + dense_hand
317
+
318
+ #resolving users' upper clothes in hand
319
+ up_clothes = (parse_array == 4).astype(np.float32)
320
+ low_clothes = (parse_array == 6).astype(np.float32) + \
321
+ (parse_array == 5).astype(np.float32) +\
322
+ (parse_array == 7).astype(np.float32)
323
+ up_clothes = cv2.dilate(up_clothes, np.ones((5, 5), np.uint16), iterations=3)
324
+ low_clothes = cv2.dilate(low_clothes, np.ones((5, 5), np.uint16), iterations=3)
325
+
326
+ dense_fixed = np.logical_and(dense_fixed, np.logical_not(up_clothes))
327
+ dense_fixed = np.logical_and(dense_fixed, np.logical_not(low_clothes))
328
+ dense_fixed = (dense_fixed).astype(np.float32)
329
+
330
+ #masking for upper_clothes and lower_body
331
+ if category == 'upper_clothes' or category == 'full_body' or category == 'dresses':
332
+ dense_mask += (dense_pose == 1).astype(np.float32) + \
333
+ (dense_pose == 2).astype(np.float32) + \
334
+ (dense_pose == 15).astype(np.float32) + \
335
+ (dense_pose == 16).astype(np.float32) + \
336
+ (dense_pose == 17).astype(np.float32) + \
337
+ (dense_pose == 18).astype(np.float32) + \
338
+ (dense_pose == 19).astype(np.float32) + \
339
+ (dense_pose == 20).astype(np.float32) + \
340
+ (dense_pose == 21).astype(np.float32) + \
341
+ (dense_pose == 22).astype(np.float32)
342
+ if category == 'lower_body_pants' or category == 'lower_body_skirts' or category == 'full_body' or category == 'dresses':
343
+ dense_mask += (dense_pose == 7).astype(np.float32) + \
344
+ (dense_pose == 8).astype(np.float32) + \
345
+ (dense_pose == 9).astype(np.float32) + \
346
+ (dense_pose == 10).astype(np.float32) + \
347
+ (dense_pose == 11).astype(np.float32) + \
348
+ (dense_pose == 12).astype(np.float32) + \
349
+ (dense_pose == 13).astype(np.float32) + \
350
+ (dense_pose == 14).astype(np.float32)
351
+
352
+ # if category == 'lower_body_pants' or category == 'lower_body_skirts':
353
+ # dense_fixed += (dense_pose == 15).astype(np.float32) + \
354
+ # (dense_pose == 16).astype(np.float32) + \
355
+ # (dense_pose == 17).astype(np.float32) + \
356
+ # (dense_pose == 18).astype(np.float32) + \
357
+ # (dense_pose == 19).astype(np.float32) + \
358
+ # (dense_pose == 20).astype(np.float32) + \
359
+ # (dense_pose == 21).astype(np.float32) + \
360
+ # (dense_pose == 22).astype(np.float32)
361
+ # dense_fixed = cv2.dilate(dense_fixed, np.ones((5, 5), np.uint16), iterations=1)
362
+
363
+
364
+ if category == 'lower_body_skirts' or category == 'dresses':
365
+ #masking giữa 2 chân
366
+ extra_mask = ImageOps.invert(extra_mask)
367
+ extra_mask = np.array(extra_mask)
368
+ extra_mask = cv2.dilate(extra_mask, np.ones((5, 5), np.uint16), iterations=9)
369
+ dense_mask = np.logical_or(dense_mask, extra_mask)
370
+ dense_mask = dense_mask.astype(np.float32)
371
+
372
+ if category == "lower_body_pants" :
373
+ extra_dense_mask = cv2.dilate(dense_mask, np.ones((5, 5), np.uint16), iterations=5)
374
+ backgroud_mask = (dense_pose == 0).astype(np.float32)
375
+ extra_dense_mask = np.logical_and(extra_dense_mask, np.logical_not(backgroud_mask))
376
+
377
+ dense_mask = np.logical_or(dense_mask, extra_dense_mask)
378
+ dense_mask = dense_mask.astype(np.float32)
379
+
380
+ #grow the mask
381
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2 * 10 + 1, 2 * 10 + 1))
382
+ dense_mask = cv2.dilate(dense_mask, kernel, iterations=1)
383
+
384
+ dense_mask_img = Image.fromarray(dense_mask.astype(np.uint8) * 255)
385
+ dense_mask_img.save("mask_new.png")
386
+
387
+ #refine for upper_clothes
388
+ if category == 'upper_clothes':
389
+ mid_y = max(mid_point_left[1], mid_point_right[1])
390
+ y_grid = np.arange(dense_mask.shape[0]).reshape(-1, 1)
391
+ lower_half_mask = y_grid > mid_y
392
+ lower_half_mask = np.tile(lower_half_mask, (1, dense_mask.shape[1]))
393
+ dense_mask[lower_half_mask] = 0
394
+
395
+ inpaint_mask = np.logical_or(inpaint_mask, dense_mask)
396
+
397
+ img = np.where(inpaint_mask, 255, 0)
398
+ dst = hole_fill(img.astype(np.uint8))
399
+
400
+ # inpaint_mask = dst / 255 * 1
401
+ # inpaint_mask_img = Image.fromarray(inpaint_mask.astype(np.uint8) * 255)
402
+ # inpaint_mask_img.save("mask_inpaint_before.png")
403
+
404
+ dst = refine_mask(dst)
405
+ inpaint_mask = dst / 255 * 1
406
+
407
+ inpaint_mask_img = Image.fromarray(inpaint_mask.astype(np.uint8) * 255)
408
+ inpaint_mask_img.save("mask_inpaint.png")
409
+ #refine for upper_clothes
410
+
411
+ #keep hand, foot, head
412
+ inpaint_mask = np.logical_and(inpaint_mask, np.logical_not(dense_fixed))
413
+
414
+ mask = Image.fromarray(inpaint_mask.astype(np.uint8) * 255)
415
+ mask_gray = Image.fromarray(inpaint_mask.astype(np.uint8) * 127)
416
+
417
+ return mask, mask_gray, inpaint_mask
418
+
419
+ def merge_mask_image(image, mask):
420
+ mask = mask.convert("L")
421
+ white_image = Image.new("RGB", image.size, "white")
422
+ inverted_mask = Image.eval(mask, lambda x: 255 - x)
423
+ combined_image = Image.composite(image, white_image, inverted_mask)
424
+
425
+ return combined_image
426
+
427
+ #get bbox from densepose
428
+ def get_bbox_from_densepose(image, densepose_array, padding=0):
429
+ body_pixels = np.column_stack(np.where(densepose_array > 0))
430
+
431
+ if body_pixels.size == 0:
432
+ return None # No body pixels found
433
+
434
+ min_y, min_x = body_pixels.min(axis=0)
435
+ max_y, max_x = body_pixels.max(axis=0)
436
+
437
+ min_x = max(0, min_x - padding)
438
+ min_y = max(0, min_y - padding)
439
+ max_x = min(densepose_array.shape[1], max_x + padding)
440
+ max_y = min(densepose_array.shape[0], max_y + padding)
441
+
442
+ bbox = (min_x, min_y, max_x, max_y)
443
+
444
+ mask = np.zeros_like(image)
445
+ min_x, min_y, max_x, max_y = bbox
446
+ mask[min_y:max_y, min_x:max_x, :] = 255
447
+ masked_image = np.where(mask == 255, image, 0)
448
+ masked_image = Image.fromarray(masked_image)
449
+
450
+ return masked_image
451
+
452
+
453
+ #testing
454
+ import matplotlib.pyplot as plt
455
+ from preprocess.openpose.run_openpose import OpenPose
456
+ from preprocess.humanparsing.run_parsing import Parsing
457
+
458
+ # from humanparsing.run_parsing import Parsing
459
+
460
+ if __name__ == '__main__':
461
+ device = "cuda" if torch.cuda.is_available() else "cpu"
462
+ openpose_model = OpenPose(0)
463
+ openpose_model.preprocessor.body_estimation.model.to(device)
464
+
465
+ model_image = Image.open('../model1.jpg').copy()
466
+ model_image = model_image.resize((768, 1024))
467
+
468
+ human_img_arg = _apply_exif_orientation(model_image.resize((384,512)))
469
+ human_img_arg = convert_PIL_to_numpy(human_img_arg, format="BGR")
470
+
471
+ args = apply_net.create_argument_parser().parse_args(('show', './configs/densepose_rcnn_R_50_FPN_s1x.yaml', './ckpt/densepose/model_final_162be9.pkl', 'dp_segm', '-v', '--opts', 'MODEL.DEVICE', 'cuda'))
472
+ dense_pose = args.func(args,human_img_arg)
473
+
474
+ Image.fromarray(dense_pose[0][:,:,::-1]).resize((768,1024)).save("densepose.png")
475
+
476
+ dense_pose = dense_pose[1]
477
+
478
+ bbox_image = get_bbox_from_densepose(model_image.resize((384,512)), dense_pose, 15)
479
+ bbox_image.save("zzz.png")
480
+
481
+ #get keypoints
482
+ keypoints = openpose_model(bbox_image)
483
+
484
+ parsing_model = Parsing(0)
485
+ model_parse, _ = parsing_model(model_image.resize((384,512)))
486
+ model_parse.save("model_parse.png")
487
+
488
+ cate = ['upper_clothes', 'lower_body_pants', 'lower_body_skirts', 'dresses', 'full_body', 'lower_body_shoes', 'lower_body_boots']
489
+ # cate = ['lower_body_pants']
490
+ for category in cate:
491
+ mask, mask_gray, mask_arr = get_mask_location_new(category, model_parse, keypoints, width=384, height=512, dense_pose = dense_pose)
492
+ mask.resize((768, 1024)).save(f"mask_{category}.png")
493
+
494
+ model_image = model_image.resize((384, 512))
495
+ # print("kkkkkkkkk")
496
+ # mask = Image.open("mask_fixed.png")
497
+ model_image_end = merge_mask_image(model_image, mask)
498
+ model_image_end.save(f"model_image_{category}.png")