Spaces:
Sleeping
Sleeping
# Ultralytics YOLO 🚀, AGPL-3.0 license | |
import contextlib | |
import math | |
import re | |
import time | |
import cv2 | |
import numpy as np | |
import torch | |
import torch.nn.functional as F | |
import torchvision | |
from ultralytics.utils import LOGGER | |
from ultralytics.utils.metrics import batch_probiou | |
class Profile(contextlib.ContextDecorator): | |
""" | |
YOLOv8 Profile class. Use as a decorator with @Profile() or as a context manager with 'with Profile():'. | |
Example: | |
```python | |
from ultralytics.utils.ops import Profile | |
with Profile(device=device) as dt: | |
pass # slow operation here | |
print(dt) # prints "Elapsed time is 9.5367431640625e-07 s" | |
``` | |
""" | |
def __init__(self, t=0.0, device: torch.device = None): | |
""" | |
Initialize the Profile class. | |
Args: | |
t (float): Initial time. Defaults to 0.0. | |
device (torch.device): Devices used for model inference. Defaults to None (cpu). | |
""" | |
self.t = t | |
self.device = device | |
self.cuda = bool(device and str(device).startswith("cuda")) | |
def __enter__(self): | |
"""Start timing.""" | |
self.start = self.time() | |
return self | |
def __exit__(self, type, value, traceback): # noqa | |
"""Stop timing.""" | |
self.dt = self.time() - self.start # delta-time | |
self.t += self.dt # accumulate dt | |
def __str__(self): | |
"""Returns a human-readable string representing the accumulated elapsed time in the profiler.""" | |
return f"Elapsed time is {self.t} s" | |
def time(self): | |
"""Get current time.""" | |
if self.cuda: | |
torch.cuda.synchronize(self.device) | |
return time.time() | |
def segment2box(segment, width=640, height=640): | |
""" | |
Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy). | |
Args: | |
segment (torch.Tensor): the segment label | |
width (int): the width of the image. Defaults to 640 | |
height (int): The height of the image. Defaults to 640 | |
Returns: | |
(np.ndarray): the minimum and maximum x and y values of the segment. | |
""" | |
x, y = segment.T # segment xy | |
inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) | |
x = x[inside] | |
y = y[inside] | |
return ( | |
np.array([x.min(), y.min(), x.max(), y.max()], dtype=segment.dtype) | |
if any(x) | |
else np.zeros(4, dtype=segment.dtype) | |
) # xyxy | |
def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None, padding=True, xywh=False): | |
""" | |
Rescales bounding boxes (in the format of xyxy by default) from the shape of the image they were originally | |
specified in (img1_shape) to the shape of a different image (img0_shape). | |
Args: | |
img1_shape (tuple): The shape of the image that the bounding boxes are for, in the format of (height, width). | |
boxes (torch.Tensor): the bounding boxes of the objects in the image, in the format of (x1, y1, x2, y2) | |
img0_shape (tuple): the shape of the target image, in the format of (height, width). | |
ratio_pad (tuple): a tuple of (ratio, pad) for scaling the boxes. If not provided, the ratio and pad will be | |
calculated based on the size difference between the two images. | |
padding (bool): If True, assuming the boxes is based on image augmented by yolo style. If False then do regular | |
rescaling. | |
xywh (bool): The box format is xywh or not, default=False. | |
Returns: | |
boxes (torch.Tensor): The scaled bounding boxes, in the format of (x1, y1, x2, y2) | |
""" | |
if ratio_pad is None: # calculate from img0_shape | |
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new | |
pad = ( | |
round((img1_shape[1] - img0_shape[1] * gain) / 2 - 0.1), | |
round((img1_shape[0] - img0_shape[0] * gain) / 2 - 0.1), | |
) # wh padding | |
else: | |
gain = ratio_pad[0][0] | |
pad = ratio_pad[1] | |
if padding: | |
boxes[..., 0] -= pad[0] # x padding | |
boxes[..., 1] -= pad[1] # y padding | |
if not xywh: | |
boxes[..., 2] -= pad[0] # x padding | |
boxes[..., 3] -= pad[1] # y padding | |
boxes[..., :4] /= gain | |
return clip_boxes(boxes, img0_shape) | |
def make_divisible(x, divisor): | |
""" | |
Returns the nearest number that is divisible by the given divisor. | |
Args: | |
x (int): The number to make divisible. | |
divisor (int | torch.Tensor): The divisor. | |
Returns: | |
(int): The nearest number divisible by the divisor. | |
""" | |
if isinstance(divisor, torch.Tensor): | |
divisor = int(divisor.max()) # to int | |
return math.ceil(x / divisor) * divisor | |
def nms_rotated(boxes, scores, threshold=0.45): | |
""" | |
NMS for obbs, powered by probiou and fast-nms. | |
Args: | |
boxes (torch.Tensor): (N, 5), xywhr. | |
scores (torch.Tensor): (N, ). | |
threshold (float): IoU threshold. | |
Returns: | |
""" | |
if len(boxes) == 0: | |
return np.empty((0,), dtype=np.int8) | |
sorted_idx = torch.argsort(scores, descending=True) | |
boxes = boxes[sorted_idx] | |
ious = batch_probiou(boxes, boxes).triu_(diagonal=1) | |
pick = torch.nonzero(ious.max(dim=0)[0] < threshold).squeeze_(-1) | |
return sorted_idx[pick] | |
def non_max_suppression( | |
prediction, | |
conf_thres=0.25, | |
iou_thres=0.45, | |
classes=None, | |
agnostic=False, | |
multi_label=False, | |
labels=(), | |
max_det=300, | |
nc=0, # number of classes (optional) | |
max_time_img=0.05, | |
max_nms=30000, | |
max_wh=7680, | |
in_place=True, | |
rotated=False, | |
): | |
""" | |
Perform non-maximum suppression (NMS) on a set of boxes, with support for masks and multiple labels per box. | |
Args: | |
prediction (torch.Tensor): A tensor of shape (batch_size, num_classes + 4 + num_masks, num_boxes) | |
containing the predicted boxes, classes, and masks. The tensor should be in the format | |
output by a model, such as YOLO. | |
conf_thres (float): The confidence threshold below which boxes will be filtered out. | |
Valid values are between 0.0 and 1.0. | |
iou_thres (float): The IoU threshold below which boxes will be filtered out during NMS. | |
Valid values are between 0.0 and 1.0. | |
classes (List[int]): A list of class indices to consider. If None, all classes will be considered. | |
agnostic (bool): If True, the model is agnostic to the number of classes, and all | |
classes will be considered as one. | |
multi_label (bool): If True, each box may have multiple labels. | |
labels (List[List[Union[int, float, torch.Tensor]]]): A list of lists, where each inner | |
list contains the apriori labels for a given image. The list should be in the format | |
output by a dataloader, with each label being a tuple of (class_index, x1, y1, x2, y2). | |
max_det (int): The maximum number of boxes to keep after NMS. | |
nc (int, optional): The number of classes output by the model. Any indices after this will be considered masks. | |
max_time_img (float): The maximum time (seconds) for processing one image. | |
max_nms (int): The maximum number of boxes into torchvision.ops.nms(). | |
max_wh (int): The maximum box width and height in pixels. | |
in_place (bool): If True, the input prediction tensor will be modified in place. | |
Returns: | |
(List[torch.Tensor]): A list of length batch_size, where each element is a tensor of | |
shape (num_boxes, 6 + num_masks) containing the kept boxes, with columns | |
(x1, y1, x2, y2, confidence, class, mask1, mask2, ...). | |
""" | |
# Checks | |
assert 0 <= conf_thres <= 1, f"Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0" | |
assert 0 <= iou_thres <= 1, f"Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0" | |
if isinstance(prediction, (list, tuple)): # YOLOv8 model in validation model, output = (inference_out, loss_out) | |
prediction = prediction[0] # select only inference output | |
bs = prediction.shape[0] # batch size | |
nc = nc or (prediction.shape[1] - 4) # number of classes | |
nm = prediction.shape[1] - nc - 4 | |
mi = 4 + nc # mask start index | |
xc = prediction[:, 4:mi].amax(1) > conf_thres # candidates | |
# Settings | |
# min_wh = 2 # (pixels) minimum box width and height | |
time_limit = 2.0 + max_time_img * bs # seconds to quit after | |
multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) | |
prediction = prediction.transpose(-1, -2) # shape(1,84,6300) to shape(1,6300,84) | |
if not rotated: | |
if in_place: | |
prediction[..., :4] = xywh2xyxy(prediction[..., :4]) # xywh to xyxy | |
else: | |
prediction = torch.cat((xywh2xyxy(prediction[..., :4]), prediction[..., 4:]), dim=-1) # xywh to xyxy | |
t = time.time() | |
output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs | |
for xi, x in enumerate(prediction): # image index, image inference | |
# Apply constraints | |
# x[((x[:, 2:4] < min_wh) | (x[:, 2:4] > max_wh)).any(1), 4] = 0 # width-height | |
x = x[xc[xi]] # confidence | |
# Cat apriori labels if autolabelling | |
if labels and len(labels[xi]) and not rotated: | |
lb = labels[xi] | |
v = torch.zeros((len(lb), nc + nm + 4), device=x.device) | |
v[:, :4] = xywh2xyxy(lb[:, 1:5]) # box | |
v[range(len(lb)), lb[:, 0].long() + 4] = 1.0 # cls | |
x = torch.cat((x, v), 0) | |
# If none remain process next image | |
if not x.shape[0]: | |
continue | |
# Detections matrix nx6 (xyxy, conf, cls) | |
box, cls, mask = x.split((4, nc, nm), 1) | |
if multi_label: | |
i, j = torch.where(cls > conf_thres) | |
x = torch.cat((box[i], x[i, 4 + j, None], j[:, None].float(), mask[i]), 1) | |
else: # best class only | |
conf, j = cls.max(1, keepdim=True) | |
x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres] | |
# Filter by class | |
if classes is not None: | |
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] | |
# Check shape | |
n = x.shape[0] # number of boxes | |
if not n: # no boxes | |
continue | |
if n > max_nms: # excess boxes | |
x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence and remove excess boxes | |
# Batched NMS | |
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes | |
scores = x[:, 4] # scores | |
if rotated: | |
boxes = torch.cat((x[:, :2] + c, x[:, 2:4], x[:, -1:]), dim=-1) # xywhr | |
i = nms_rotated(boxes, scores, iou_thres) | |
else: | |
boxes = x[:, :4] + c # boxes (offset by class) | |
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS | |
i = i[:max_det] # limit detections | |
# # Experimental | |
# merge = False # use merge-NMS | |
# if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) | |
# # Update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) | |
# from .metrics import box_iou | |
# iou = box_iou(boxes[i], boxes) > iou_thres # IoU matrix | |
# weights = iou * scores[None] # box weights | |
# x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes | |
# redundant = True # require redundant detections | |
# if redundant: | |
# i = i[iou.sum(1) > 1] # require redundancy | |
output[xi] = x[i] | |
if (time.time() - t) > time_limit: | |
LOGGER.warning(f"WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded") | |
break # time limit exceeded | |
return output | |
def clip_boxes(boxes, shape): | |
""" | |
Takes a list of bounding boxes and a shape (height, width) and clips the bounding boxes to the shape. | |
Args: | |
boxes (torch.Tensor): the bounding boxes to clip | |
shape (tuple): the shape of the image | |
Returns: | |
(torch.Tensor | numpy.ndarray): Clipped boxes | |
""" | |
if isinstance(boxes, torch.Tensor): # faster individually (WARNING: inplace .clamp_() Apple MPS bug) | |
boxes[..., 0] = boxes[..., 0].clamp(0, shape[1]) # x1 | |
boxes[..., 1] = boxes[..., 1].clamp(0, shape[0]) # y1 | |
boxes[..., 2] = boxes[..., 2].clamp(0, shape[1]) # x2 | |
boxes[..., 3] = boxes[..., 3].clamp(0, shape[0]) # y2 | |
else: # np.array (faster grouped) | |
boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1]) # x1, x2 | |
boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0]) # y1, y2 | |
return boxes | |
def clip_coords(coords, shape): | |
""" | |
Clip line coordinates to the image boundaries. | |
Args: | |
coords (torch.Tensor | numpy.ndarray): A list of line coordinates. | |
shape (tuple): A tuple of integers representing the size of the image in the format (height, width). | |
Returns: | |
(torch.Tensor | numpy.ndarray): Clipped coordinates | |
""" | |
if isinstance(coords, torch.Tensor): # faster individually (WARNING: inplace .clamp_() Apple MPS bug) | |
coords[..., 0] = coords[..., 0].clamp(0, shape[1]) # x | |
coords[..., 1] = coords[..., 1].clamp(0, shape[0]) # y | |
else: # np.array (faster grouped) | |
coords[..., 0] = coords[..., 0].clip(0, shape[1]) # x | |
coords[..., 1] = coords[..., 1].clip(0, shape[0]) # y | |
return coords | |
def scale_image(masks, im0_shape, ratio_pad=None): | |
""" | |
Takes a mask, and resizes it to the original image size. | |
Args: | |
masks (np.ndarray): resized and padded masks/images, [h, w, num]/[h, w, 3]. | |
im0_shape (tuple): the original image shape | |
ratio_pad (tuple): the ratio of the padding to the original image. | |
Returns: | |
masks (torch.Tensor): The masks that are being returned. | |
""" | |
# Rescale coordinates (xyxy) from im1_shape to im0_shape | |
im1_shape = masks.shape | |
if im1_shape[:2] == im0_shape[:2]: | |
return masks | |
if ratio_pad is None: # calculate from im0_shape | |
gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new | |
pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding | |
else: | |
# gain = ratio_pad[0][0] | |
pad = ratio_pad[1] | |
top, left = int(pad[1]), int(pad[0]) # y, x | |
bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0]) | |
if len(masks.shape) < 2: | |
raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}') | |
masks = masks[top:bottom, left:right] | |
masks = cv2.resize(masks, (im0_shape[1], im0_shape[0])) | |
if len(masks.shape) == 2: | |
masks = masks[:, :, None] | |
return masks | |
def xyxy2xywh(x): | |
""" | |
Convert bounding box coordinates from (x1, y1, x2, y2) format to (x, y, width, height) format where (x1, y1) is the | |
top-left corner and (x2, y2) is the bottom-right corner. | |
Args: | |
x (np.ndarray | torch.Tensor): The input bounding box coordinates in (x1, y1, x2, y2) format. | |
Returns: | |
y (np.ndarray | torch.Tensor): The bounding box coordinates in (x, y, width, height) format. | |
""" | |
assert x.shape[-1] == 4, f"input shape last dimension expected 4 but input shape is {x.shape}" | |
y = torch.empty_like(x) if isinstance(x, torch.Tensor) else np.empty_like(x) # faster than clone/copy | |
y[..., 0] = (x[..., 0] + x[..., 2]) / 2 # x center | |
y[..., 1] = (x[..., 1] + x[..., 3]) / 2 # y center | |
y[..., 2] = x[..., 2] - x[..., 0] # width | |
y[..., 3] = x[..., 3] - x[..., 1] # height | |
return y | |
def xywh2xyxy(x): | |
""" | |
Convert bounding box coordinates from (x, y, width, height) format to (x1, y1, x2, y2) format where (x1, y1) is the | |
top-left corner and (x2, y2) is the bottom-right corner. | |
Args: | |
x (np.ndarray | torch.Tensor): The input bounding box coordinates in (x, y, width, height) format. | |
Returns: | |
y (np.ndarray | torch.Tensor): The bounding box coordinates in (x1, y1, x2, y2) format. | |
""" | |
assert x.shape[-1] == 4, f"input shape last dimension expected 4 but input shape is {x.shape}" | |
y = torch.empty_like(x) if isinstance(x, torch.Tensor) else np.empty_like(x) # faster than clone/copy | |
dw = x[..., 2] / 2 # half-width | |
dh = x[..., 3] / 2 # half-height | |
y[..., 0] = x[..., 0] - dw # top left x | |
y[..., 1] = x[..., 1] - dh # top left y | |
y[..., 2] = x[..., 0] + dw # bottom right x | |
y[..., 3] = x[..., 1] + dh # bottom right y | |
return y | |
def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): | |
""" | |
Convert normalized bounding box coordinates to pixel coordinates. | |
Args: | |
x (np.ndarray | torch.Tensor): The bounding box coordinates. | |
w (int): Width of the image. Defaults to 640 | |
h (int): Height of the image. Defaults to 640 | |
padw (int): Padding width. Defaults to 0 | |
padh (int): Padding height. Defaults to 0 | |
Returns: | |
y (np.ndarray | torch.Tensor): The coordinates of the bounding box in the format [x1, y1, x2, y2] where | |
x1,y1 is the top-left corner, x2,y2 is the bottom-right corner of the bounding box. | |
""" | |
assert x.shape[-1] == 4, f"input shape last dimension expected 4 but input shape is {x.shape}" | |
y = torch.empty_like(x) if isinstance(x, torch.Tensor) else np.empty_like(x) # faster than clone/copy | |
y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x | |
y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y | |
y[..., 2] = w * (x[..., 0] + x[..., 2] / 2) + padw # bottom right x | |
y[..., 3] = h * (x[..., 1] + x[..., 3] / 2) + padh # bottom right y | |
return y | |
def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): | |
""" | |
Convert bounding box coordinates from (x1, y1, x2, y2) format to (x, y, width, height, normalized) format. x, y, | |
width and height are normalized to image dimensions. | |
Args: | |
x (np.ndarray | torch.Tensor): The input bounding box coordinates in (x1, y1, x2, y2) format. | |
w (int): The width of the image. Defaults to 640 | |
h (int): The height of the image. Defaults to 640 | |
clip (bool): If True, the boxes will be clipped to the image boundaries. Defaults to False | |
eps (float): The minimum value of the box's width and height. Defaults to 0.0 | |
Returns: | |
y (np.ndarray | torch.Tensor): The bounding box coordinates in (x, y, width, height, normalized) format | |
""" | |
if clip: | |
x = clip_boxes(x, (h - eps, w - eps)) | |
assert x.shape[-1] == 4, f"input shape last dimension expected 4 but input shape is {x.shape}" | |
y = torch.empty_like(x) if isinstance(x, torch.Tensor) else np.empty_like(x) # faster than clone/copy | |
y[..., 0] = ((x[..., 0] + x[..., 2]) / 2) / w # x center | |
y[..., 1] = ((x[..., 1] + x[..., 3]) / 2) / h # y center | |
y[..., 2] = (x[..., 2] - x[..., 0]) / w # width | |
y[..., 3] = (x[..., 3] - x[..., 1]) / h # height | |
return y | |
def xywh2ltwh(x): | |
""" | |
Convert the bounding box format from [x, y, w, h] to [x1, y1, w, h], where x1, y1 are the top-left coordinates. | |
Args: | |
x (np.ndarray | torch.Tensor): The input tensor with the bounding box coordinates in the xywh format | |
Returns: | |
y (np.ndarray | torch.Tensor): The bounding box coordinates in the xyltwh format | |
""" | |
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) | |
y[..., 0] = x[..., 0] - x[..., 2] / 2 # top left x | |
y[..., 1] = x[..., 1] - x[..., 3] / 2 # top left y | |
return y | |
def xyxy2ltwh(x): | |
""" | |
Convert nx4 bounding boxes from [x1, y1, x2, y2] to [x1, y1, w, h], where xy1=top-left, xy2=bottom-right. | |
Args: | |
x (np.ndarray | torch.Tensor): The input tensor with the bounding boxes coordinates in the xyxy format | |
Returns: | |
y (np.ndarray | torch.Tensor): The bounding box coordinates in the xyltwh format. | |
""" | |
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) | |
y[..., 2] = x[..., 2] - x[..., 0] # width | |
y[..., 3] = x[..., 3] - x[..., 1] # height | |
return y | |
def ltwh2xywh(x): | |
""" | |
Convert nx4 boxes from [x1, y1, w, h] to [x, y, w, h] where xy1=top-left, xy=center. | |
Args: | |
x (torch.Tensor): the input tensor | |
Returns: | |
y (np.ndarray | torch.Tensor): The bounding box coordinates in the xywh format. | |
""" | |
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) | |
y[..., 0] = x[..., 0] + x[..., 2] / 2 # center x | |
y[..., 1] = x[..., 1] + x[..., 3] / 2 # center y | |
return y | |
def xyxyxyxy2xywhr(corners): | |
""" | |
Convert batched Oriented Bounding Boxes (OBB) from [xy1, xy2, xy3, xy4] to [xywh, rotation]. Rotation values are | |
expected in degrees from 0 to 90. | |
Args: | |
corners (numpy.ndarray | torch.Tensor): Input corners of shape (n, 8). | |
Returns: | |
(numpy.ndarray | torch.Tensor): Converted data in [cx, cy, w, h, rotation] format of shape (n, 5). | |
""" | |
is_torch = isinstance(corners, torch.Tensor) | |
points = corners.cpu().numpy() if is_torch else corners | |
points = points.reshape(len(corners), -1, 2) | |
rboxes = [] | |
for pts in points: | |
# NOTE: Use cv2.minAreaRect to get accurate xywhr, | |
# especially some objects are cut off by augmentations in dataloader. | |
(x, y), (w, h), angle = cv2.minAreaRect(pts) | |
rboxes.append([x, y, w, h, angle / 180 * np.pi]) | |
return ( | |
torch.tensor(rboxes, device=corners.device, dtype=corners.dtype) | |
if is_torch | |
else np.asarray(rboxes, dtype=points.dtype) | |
) # rboxes | |
def xywhr2xyxyxyxy(rboxes): | |
""" | |
Convert batched Oriented Bounding Boxes (OBB) from [xywh, rotation] to [xy1, xy2, xy3, xy4]. Rotation values should | |
be in degrees from 0 to 90. | |
Args: | |
rboxes (numpy.ndarray | torch.Tensor): Boxes in [cx, cy, w, h, rotation] format of shape (n, 5) or (b, n, 5). | |
Returns: | |
(numpy.ndarray | torch.Tensor): Converted corner points of shape (n, 4, 2) or (b, n, 4, 2). | |
""" | |
is_numpy = isinstance(rboxes, np.ndarray) | |
cos, sin = (np.cos, np.sin) if is_numpy else (torch.cos, torch.sin) | |
ctr = rboxes[..., :2] | |
w, h, angle = (rboxes[..., i : i + 1] for i in range(2, 5)) | |
cos_value, sin_value = cos(angle), sin(angle) | |
vec1 = [w / 2 * cos_value, w / 2 * sin_value] | |
vec2 = [-h / 2 * sin_value, h / 2 * cos_value] | |
vec1 = np.concatenate(vec1, axis=-1) if is_numpy else torch.cat(vec1, dim=-1) | |
vec2 = np.concatenate(vec2, axis=-1) if is_numpy else torch.cat(vec2, dim=-1) | |
pt1 = ctr + vec1 + vec2 | |
pt2 = ctr + vec1 - vec2 | |
pt3 = ctr - vec1 - vec2 | |
pt4 = ctr - vec1 + vec2 | |
return np.stack([pt1, pt2, pt3, pt4], axis=-2) if is_numpy else torch.stack([pt1, pt2, pt3, pt4], dim=-2) | |
def ltwh2xyxy(x): | |
""" | |
It converts the bounding box from [x1, y1, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right. | |
Args: | |
x (np.ndarray | torch.Tensor): the input image | |
Returns: | |
y (np.ndarray | torch.Tensor): the xyxy coordinates of the bounding boxes. | |
""" | |
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) | |
y[..., 2] = x[..., 2] + x[..., 0] # width | |
y[..., 3] = x[..., 3] + x[..., 1] # height | |
return y | |
def segments2boxes(segments): | |
""" | |
It converts segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh) | |
Args: | |
segments (list): list of segments, each segment is a list of points, each point is a list of x, y coordinates | |
Returns: | |
(np.ndarray): the xywh coordinates of the bounding boxes. | |
""" | |
boxes = [] | |
for s in segments: | |
x, y = s.T # segment xy | |
boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy | |
return xyxy2xywh(np.array(boxes)) # cls, xywh | |
def resample_segments(segments, n=1000): | |
""" | |
Inputs a list of segments (n,2) and returns a list of segments (n,2) up-sampled to n points each. | |
Args: | |
segments (list): a list of (n,2) arrays, where n is the number of points in the segment. | |
n (int): number of points to resample the segment to. Defaults to 1000 | |
Returns: | |
segments (list): the resampled segments. | |
""" | |
for i, s in enumerate(segments): | |
s = np.concatenate((s, s[0:1, :]), axis=0) | |
x = np.linspace(0, len(s) - 1, n) | |
xp = np.arange(len(s)) | |
segments[i] = ( | |
np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)], dtype=np.float32).reshape(2, -1).T | |
) # segment xy | |
return segments | |
def crop_mask(masks, boxes): | |
""" | |
It takes a mask and a bounding box, and returns a mask that is cropped to the bounding box. | |
Args: | |
masks (torch.Tensor): [n, h, w] tensor of masks | |
boxes (torch.Tensor): [n, 4] tensor of bbox coordinates in relative point form | |
Returns: | |
(torch.Tensor): The masks are being cropped to the bounding box. | |
""" | |
_, h, w = masks.shape | |
x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(n,1,1) | |
r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,1,w) | |
c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(1,h,1) | |
return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2)) | |
def process_mask_upsample(protos, masks_in, bboxes, shape): | |
""" | |
Takes the output of the mask head, and applies the mask to the bounding boxes. This produces masks of higher quality | |
but is slower. | |
Args: | |
protos (torch.Tensor): [mask_dim, mask_h, mask_w] | |
masks_in (torch.Tensor): [n, mask_dim], n is number of masks after nms | |
bboxes (torch.Tensor): [n, 4], n is number of masks after nms | |
shape (tuple): the size of the input image (h,w) | |
Returns: | |
(torch.Tensor): The upsampled masks. | |
""" | |
c, mh, mw = protos.shape # CHW | |
masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) | |
masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW | |
masks = crop_mask(masks, bboxes) # CHW | |
return masks.gt_(0.5) | |
def process_mask(protos, masks_in, bboxes, shape, upsample=False): | |
""" | |
Apply masks to bounding boxes using the output of the mask head. | |
Args: | |
protos (torch.Tensor): A tensor of shape [mask_dim, mask_h, mask_w]. | |
masks_in (torch.Tensor): A tensor of shape [n, mask_dim], where n is the number of masks after NMS. | |
bboxes (torch.Tensor): A tensor of shape [n, 4], where n is the number of masks after NMS. | |
shape (tuple): A tuple of integers representing the size of the input image in the format (h, w). | |
upsample (bool): A flag to indicate whether to upsample the mask to the original image size. Default is False. | |
Returns: | |
(torch.Tensor): A binary mask tensor of shape [n, h, w], where n is the number of masks after NMS, and h and w | |
are the height and width of the input image. The mask is applied to the bounding boxes. | |
""" | |
c, mh, mw = protos.shape # CHW | |
ih, iw = shape | |
masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW | |
width_ratio = mw / iw | |
height_ratio = mh / ih | |
downsampled_bboxes = bboxes.clone() | |
downsampled_bboxes[:, 0] *= width_ratio | |
downsampled_bboxes[:, 2] *= width_ratio | |
downsampled_bboxes[:, 3] *= height_ratio | |
downsampled_bboxes[:, 1] *= height_ratio | |
masks = crop_mask(masks, downsampled_bboxes) # CHW | |
if upsample: | |
masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW | |
return masks.gt_(0.5) | |
def process_mask_native(protos, masks_in, bboxes, shape): | |
""" | |
It takes the output of the mask head, and crops it after upsampling to the bounding boxes. | |
Args: | |
protos (torch.Tensor): [mask_dim, mask_h, mask_w] | |
masks_in (torch.Tensor): [n, mask_dim], n is number of masks after nms | |
bboxes (torch.Tensor): [n, 4], n is number of masks after nms | |
shape (tuple): the size of the input image (h,w) | |
Returns: | |
masks (torch.Tensor): The returned masks with dimensions [h, w, n] | |
""" | |
c, mh, mw = protos.shape # CHW | |
masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) | |
masks = scale_masks(masks[None], shape)[0] # CHW | |
masks = crop_mask(masks, bboxes) # CHW | |
return masks.gt_(0.5) | |
def scale_masks(masks, shape, padding=True): | |
""" | |
Rescale segment masks to shape. | |
Args: | |
masks (torch.Tensor): (N, C, H, W). | |
shape (tuple): Height and width. | |
padding (bool): If True, assuming the boxes is based on image augmented by yolo style. If False then do regular | |
rescaling. | |
""" | |
mh, mw = masks.shape[2:] | |
gain = min(mh / shape[0], mw / shape[1]) # gain = old / new | |
pad = [mw - shape[1] * gain, mh - shape[0] * gain] # wh padding | |
if padding: | |
pad[0] /= 2 | |
pad[1] /= 2 | |
top, left = (int(pad[1]), int(pad[0])) if padding else (0, 0) # y, x | |
bottom, right = (int(mh - pad[1]), int(mw - pad[0])) | |
masks = masks[..., top:bottom, left:right] | |
masks = F.interpolate(masks, shape, mode="bilinear", align_corners=False) # NCHW | |
return masks | |
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None, normalize=False, padding=True): | |
""" | |
Rescale segment coordinates (xy) from img1_shape to img0_shape. | |
Args: | |
img1_shape (tuple): The shape of the image that the coords are from. | |
coords (torch.Tensor): the coords to be scaled of shape n,2. | |
img0_shape (tuple): the shape of the image that the segmentation is being applied to. | |
ratio_pad (tuple): the ratio of the image size to the padded image size. | |
normalize (bool): If True, the coordinates will be normalized to the range [0, 1]. Defaults to False. | |
padding (bool): If True, assuming the boxes is based on image augmented by yolo style. If False then do regular | |
rescaling. | |
Returns: | |
coords (torch.Tensor): The scaled coordinates. | |
""" | |
if ratio_pad is None: # calculate from img0_shape | |
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new | |
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding | |
else: | |
gain = ratio_pad[0][0] | |
pad = ratio_pad[1] | |
if padding: | |
coords[..., 0] -= pad[0] # x padding | |
coords[..., 1] -= pad[1] # y padding | |
coords[..., 0] /= gain | |
coords[..., 1] /= gain | |
coords = clip_coords(coords, img0_shape) | |
if normalize: | |
coords[..., 0] /= img0_shape[1] # width | |
coords[..., 1] /= img0_shape[0] # height | |
return coords | |
def regularize_rboxes(rboxes): | |
""" | |
Regularize rotated boxes in range [0, pi/2]. | |
Args: | |
rboxes (torch.Tensor): (N, 5), xywhr. | |
Returns: | |
(torch.Tensor): The regularized boxes. | |
""" | |
x, y, w, h, t = rboxes.unbind(dim=-1) | |
# Swap edge and angle if h >= w | |
w_ = torch.where(w > h, w, h) | |
h_ = torch.where(w > h, h, w) | |
t = torch.where(w > h, t, t + math.pi / 2) % math.pi | |
return torch.stack([x, y, w_, h_, t], dim=-1) # regularized boxes | |
def masks2segments(masks, strategy="largest"): | |
""" | |
It takes a list of masks(n,h,w) and returns a list of segments(n,xy) | |
Args: | |
masks (torch.Tensor): the output of the model, which is a tensor of shape (batch_size, 160, 160) | |
strategy (str): 'concat' or 'largest'. Defaults to largest | |
Returns: | |
segments (List): list of segment masks | |
""" | |
segments = [] | |
for x in masks.int().cpu().numpy().astype("uint8"): | |
c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] | |
if c: | |
if strategy == "concat": # concatenate all segments | |
c = np.concatenate([x.reshape(-1, 2) for x in c]) | |
elif strategy == "largest": # select largest segment | |
c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2) | |
else: | |
c = np.zeros((0, 2)) # no segments found | |
segments.append(c.astype("float32")) | |
return segments | |
def convert_torch2numpy_batch(batch: torch.Tensor) -> np.ndarray: | |
""" | |
Convert a batch of FP32 torch tensors (0.0-1.0) to a NumPy uint8 array (0-255), changing from BCHW to BHWC layout. | |
Args: | |
batch (torch.Tensor): Input tensor batch of shape (Batch, Channels, Height, Width) and dtype torch.float32. | |
Returns: | |
(np.ndarray): Output NumPy array batch of shape (Batch, Height, Width, Channels) and dtype uint8. | |
""" | |
return (batch.permute(0, 2, 3, 1).contiguous() * 255).clamp(0, 255).to(torch.uint8).cpu().numpy() | |
def clean_str(s): | |
""" | |
Cleans a string by replacing special characters with underscore _ | |
Args: | |
s (str): a string needing special characters replaced | |
Returns: | |
(str): a string with special characters replaced by an underscore _ | |
""" | |
return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) | |
def v10postprocess(preds, max_det, nc=80): | |
assert(4 + nc == preds.shape[-1]) | |
boxes, scores = preds.split([4, nc], dim=-1) | |
max_scores = scores.amax(dim=-1) | |
max_scores, index = torch.topk(max_scores, max_det, dim=-1) | |
index = index.unsqueeze(-1) | |
boxes = torch.gather(boxes, dim=1, index=index.repeat(1, 1, boxes.shape[-1])) | |
scores = torch.gather(scores, dim=1, index=index.repeat(1, 1, scores.shape[-1])) | |
scores, index = torch.topk(scores.flatten(1), max_det, dim=-1) | |
labels = index % nc | |
index = index // nc | |
boxes = boxes.gather(dim=1, index=index.unsqueeze(-1).repeat(1, 1, boxes.shape[-1])) | |
return boxes, scores, labels |