123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985 |
- # Ultralytics YOLO 🚀, AGPL-3.0 license
- import contextlib
- import math
- import re
- import time
- import cv2
- import numpy as np
- import torch
- import torch.nn.functional as F
- from ultralytics.utils import LOGGER
- from ultralytics.utils.metrics import batch_probiou
- class Profile(contextlib.ContextDecorator):
- """
- YOLOv8 Profile class. Use as a decorator with @Profile() or as a context manager with 'with Profile():'.
- Example:
- ```python
- from ultralytics.utils.ops import Profile
- with Profile(device=device) as dt:
- pass # slow operation here
- print(dt) # prints "Elapsed time is 9.5367431640625e-07 s"
- ```
- """
- def __init__(self, t=0.0, device: torch.device = None):
- """
- Initialize the Profile class.
- Args:
- t (float): Initial time. Defaults to 0.0.
- device (torch.device): Devices used for model inference. Defaults to None (cpu).
- """
- self.t = t
- self.device = device
- self.cuda = bool(device and str(device).startswith("cuda"))
- def __enter__(self):
- """Start timing."""
- self.start = self.time()
- return self
- def __exit__(self, type, value, traceback): # noqa
- """Stop timing."""
- self.dt = self.time() - self.start # delta-time
- self.t += self.dt # accumulate dt
- def __str__(self):
- """Returns a human-readable string representing the accumulated elapsed time in the profiler."""
- return f"Elapsed time is {self.t} s"
- def time(self):
- """Get current time."""
- if self.cuda:
- torch.cuda.synchronize(self.device)
- return time.time()
- def segment2box(segment, width=640, height=640):
- """
- Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy).
- Args:
- segment (torch.Tensor): the segment label
- width (int): the width of the image. Defaults to 640
- height (int): The height of the image. Defaults to 640
- Returns:
- (np.ndarray): the minimum and maximum x and y values of the segment.
- """
- x, y = segment.T # segment xy
- inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
- x = x[inside]
- y = y[inside]
- return (
- np.array([x.min(), y.min(), x.max(), y.max()], dtype=segment.dtype)
- if any(x)
- else np.zeros(4, dtype=segment.dtype)
- ) # xyxy
- def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None, padding=True, xywh=False):
- """
- Rescales bounding boxes (in the format of xyxy by default) from the shape of the image they were originally
- specified in (img1_shape) to the shape of a different image (img0_shape).
- Args:
- img1_shape (tuple): The shape of the image that the bounding boxes are for, in the format of (height, width).
- boxes (torch.Tensor): the bounding boxes of the objects in the image, in the format of (x1, y1, x2, y2)
- img0_shape (tuple): the shape of the target image, in the format of (height, width).
- ratio_pad (tuple): a tuple of (ratio, pad) for scaling the boxes. If not provided, the ratio and pad will be
- calculated based on the size difference between the two images.
- padding (bool): If True, assuming the boxes is based on image augmented by yolo style. If False then do regular
- rescaling.
- xywh (bool): The box format is xywh or not, default=False.
- Returns:
- boxes (torch.Tensor): The scaled bounding boxes, in the format of (x1, y1, x2, y2)
- """
- if ratio_pad is None: # calculate from img0_shape
- gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
- pad = (
- round((img1_shape[1] - img0_shape[1] * gain) / 2 - 0.1),
- round((img1_shape[0] - img0_shape[0] * gain) / 2 - 0.1),
- ) # wh padding
- else:
- gain = ratio_pad[0][0]
- pad = ratio_pad[1]
- if padding:
- boxes[..., 0] -= pad[0] # x padding
- boxes[..., 1] -= pad[1] # y padding
- if not xywh:
- boxes[..., 2] -= pad[0] # x padding
- boxes[..., 3] -= pad[1] # y padding
- boxes[..., :4] /= gain
- return clip_boxes(boxes, img0_shape)
- def make_divisible(x, divisor):
- """
- Returns the nearest number that is divisible by the given divisor.
- Args:
- x (int): The number to make divisible.
- divisor (int | torch.Tensor): The divisor.
- Returns:
- (int): The nearest number divisible by the divisor.
- """
- if isinstance(divisor, torch.Tensor):
- divisor = int(divisor.max()) # to int
- return math.ceil(x / divisor) * divisor
- def nms_rotated(boxes, scores, threshold=0.45):
- """
- NMS for obbs, powered by probiou and fast-nms.
- Args:
- boxes (torch.Tensor): (N, 5), xywhr.
- scores (torch.Tensor): (N, ).
- threshold (float): IoU threshold.
- Returns:
- """
- if len(boxes) == 0:
- return np.empty((0,), dtype=np.int8)
- sorted_idx = torch.argsort(scores, descending=True)
- boxes = boxes[sorted_idx]
- ious = batch_probiou(boxes, boxes).triu_(diagonal=1)
- pick = torch.nonzero(ious.max(dim=0)[0] < threshold).squeeze_(-1)
- return sorted_idx[pick]
- def bbox_iou_for_nms(box1, box2, xywh=False, GIoU=False, DIoU=False, CIoU=False, EIoU=False, SIoU=False, ShapeIoU=False, eps=1e-7, scale=0.0):
- """
- Calculate Intersection over Union (IoU) of box1(1, 4) to box2(n, 4).
- Args:
- box1 (torch.Tensor): A tensor representing a single bounding box with shape (1, 4).
- box2 (torch.Tensor): A tensor representing n bounding boxes with shape (n, 4).
- xywh (bool, optional): If True, input boxes are in (x, y, w, h) format. If False, input boxes are in
- (x1, y1, x2, y2) format. Defaults to True.
- GIoU (bool, optional): If True, calculate Generalized IoU. Defaults to False.
- DIoU (bool, optional): If True, calculate Distance IoU. Defaults to False.
- CIoU (bool, optional): If True, calculate Complete IoU. Defaults to False.
- EIoU (bool, optional): If True, calculate Efficient IoU. Defaults to False.
- SIoU (bool, optional): If True, calculate Scylla IoU. Defaults to False.
- eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7.
- Returns:
- (torch.Tensor): IoU, GIoU, DIoU, or CIoU values depending on the specified flags.
- """
- # Get the coordinates of bounding boxes
- if xywh: # transform from xywh to xyxy
- (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, -1), box2.chunk(4, -1)
- w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2
- b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_
- b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_
- else: # x1, y1, x2, y2 = box1
- b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, -1)
- b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, -1)
- w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
- w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
- # Intersection area
- inter = (b1_x2.minimum(b2_x2) - b1_x1.maximum(b2_x1)).clamp_(0) * \
- (b1_y2.minimum(b2_y2) - b1_y1.maximum(b2_y1)).clamp_(0)
- # Union Area
- union = w1 * h1 + w2 * h2 - inter + eps
- # IoU
- iou = inter / union
- if CIoU or DIoU or GIoU or EIoU or SIoU or ShapeIoU:
- cw = b1_x2.maximum(b2_x2) - b1_x1.minimum(b2_x1) # convex (smallest enclosing box) width
- ch = b1_y2.maximum(b2_y2) - b1_y1.minimum(b2_y1) # convex height
- if CIoU or DIoU or EIoU or SIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
- c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
- rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2
- if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
- v = (4 / math.pi ** 2) * (torch.atan(w2 / h2) - torch.atan(w1 / h1)).pow(2)
- with torch.no_grad():
- alpha = v / (v - iou + (1 + eps))
- return iou - (rho2 / c2 + v * alpha) # CIoU
- elif EIoU:
- rho_w2 = ((b2_x2 - b2_x1) - (b1_x2 - b1_x1)) ** 2
- rho_h2 = ((b2_y2 - b2_y1) - (b1_y2 - b1_y1)) ** 2
- cw2 = cw ** 2 + eps
- ch2 = ch ** 2 + eps
- return iou - (rho2 / c2 + rho_w2 / cw2 + rho_h2 / ch2) # EIoU
- elif SIoU:
- # SIoU Loss https://arxiv.org/pdf/2205.12740.pdf
- s_cw = (b2_x1 + b2_x2 - b1_x1 - b1_x2) * 0.5 + eps
- s_ch = (b2_y1 + b2_y2 - b1_y1 - b1_y2) * 0.5 + eps
- sigma = torch.pow(s_cw ** 2 + s_ch ** 2, 0.5)
- sin_alpha_1 = torch.abs(s_cw) / sigma
- sin_alpha_2 = torch.abs(s_ch) / sigma
- threshold = pow(2, 0.5) / 2
- sin_alpha = torch.where(sin_alpha_1 > threshold, sin_alpha_2, sin_alpha_1)
- angle_cost = torch.cos(torch.arcsin(sin_alpha) * 2 - math.pi / 2)
- rho_x = (s_cw / cw) ** 2
- rho_y = (s_ch / ch) ** 2
- gamma = angle_cost - 2
- distance_cost = 2 - torch.exp(gamma * rho_x) - torch.exp(gamma * rho_y)
- omiga_w = torch.abs(w1 - w2) / torch.max(w1, w2)
- omiga_h = torch.abs(h1 - h2) / torch.max(h1, h2)
- shape_cost = torch.pow(1 - torch.exp(-1 * omiga_w), 4) + torch.pow(1 - torch.exp(-1 * omiga_h), 4)
- return iou - 0.5 * (distance_cost + shape_cost) + eps # SIoU
- elif ShapeIoU:
- #Shape-Distance #Shape-Distance #Shape-Distance #Shape-Distance #Shape-Distance #Shape-Distance #Shape-Distance
- ww = 2 * torch.pow(w2, scale) / (torch.pow(w2, scale) + torch.pow(h2, scale))
- hh = 2 * torch.pow(h2, scale) / (torch.pow(w2, scale) + torch.pow(h2, scale))
- cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex width
- ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
- c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
- center_distance_x = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2) / 4
- center_distance_y = ((b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4
- center_distance = hh * center_distance_x + ww * center_distance_y
- distance = center_distance / c2
- #Shape-Shape #Shape-Shape #Shape-Shape #Shape-Shape #Shape-Shape #Shape-Shape #Shape-Shape #Shape-Shape
- omiga_w = hh * torch.abs(w1 - w2) / torch.max(w1, w2)
- omiga_h = ww * torch.abs(h1 - h2) / torch.max(h1, h2)
- shape_cost = torch.pow(1 - torch.exp(-1 * omiga_w), 4) + torch.pow(1 - torch.exp(-1 * omiga_h), 4)
- return iou - distance - 0.5 * shape_cost
- return iou - rho2 / c2 # DIoU
- c_area = cw * ch + eps # convex area
- return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf
- return iou # IoU
- def soft_nms(bboxes, scores, iou_thresh=0.5, sigma=0.5,score_threshold=0.25):
-
- order = torch.arange(0, scores.size(0)).to(bboxes.device)
- keep = []
-
- while order.numel() > 1:
- if order.numel() == 1:
- keep.append(order[0])
- break
- else:
- i = order[0]
- keep.append(i)
-
- iou = bbox_iou_for_nms(bboxes[i:i+1], bboxes[order[1:]], GIoU=False, DIoU=False, CIoU=False, EIoU=False, SIoU=False, ShapeIoU=False, scale=0.0).squeeze()
-
- idx = (iou > iou_thresh).nonzero().squeeze()
- if idx.numel() > 0:
- iou = iou[idx]
- newScores = torch.exp(-torch.pow(iou,2)/sigma)
- scores[order[idx+1]] *= newScores
-
- newOrder = (scores[order[1:]] > score_threshold).nonzero().squeeze()
- if newOrder.numel() == 0:
- break
- else:
- maxScoreIndex = torch.argmax(scores[order[newOrder+1]])
- if maxScoreIndex != 0:
- newOrder[[0,maxScoreIndex],] = newOrder[[maxScoreIndex,0],]
- order = order[newOrder+1]
-
- return torch.LongTensor(keep)
- def non_max_suppression(
- prediction,
- conf_thres=0.25,
- iou_thres=0.45,
- classes=None,
- agnostic=False,
- multi_label=False,
- labels=(),
- max_det=300,
- nc=0, # number of classes (optional)
- max_time_img=0.05,
- max_nms=30000,
- max_wh=7680,
- in_place=True,
- rotated=False,
- ):
- """
- Perform non-maximum suppression (NMS) on a set of boxes, with support for masks and multiple labels per box.
- Args:
- prediction (torch.Tensor): A tensor of shape (batch_size, num_classes + 4 + num_masks, num_boxes)
- containing the predicted boxes, classes, and masks. The tensor should be in the format
- output by a model, such as YOLO.
- conf_thres (float): The confidence threshold below which boxes will be filtered out.
- Valid values are between 0.0 and 1.0.
- iou_thres (float): The IoU threshold below which boxes will be filtered out during NMS.
- Valid values are between 0.0 and 1.0.
- classes (List[int]): A list of class indices to consider. If None, all classes will be considered.
- agnostic (bool): If True, the model is agnostic to the number of classes, and all
- classes will be considered as one.
- multi_label (bool): If True, each box may have multiple labels.
- labels (List[List[Union[int, float, torch.Tensor]]]): A list of lists, where each inner
- list contains the apriori labels for a given image. The list should be in the format
- output by a dataloader, with each label being a tuple of (class_index, x1, y1, x2, y2).
- max_det (int): The maximum number of boxes to keep after NMS.
- nc (int, optional): The number of classes output by the model. Any indices after this will be considered masks.
- max_time_img (float): The maximum time (seconds) for processing one image.
- max_nms (int): The maximum number of boxes into torchvision.ops.nms().
- max_wh (int): The maximum box width and height in pixels.
- in_place (bool): If True, the input prediction tensor will be modified in place.
- rotated (bool): If Oriented Bounding Boxes (OBB) are being passed for NMS.
- Returns:
- (List[torch.Tensor]): A list of length batch_size, where each element is a tensor of
- shape (num_boxes, 6 + num_masks) containing the kept boxes, with columns
- (x1, y1, x2, y2, confidence, class, mask1, mask2, ...).
- """
- import torchvision # scope for faster 'import ultralytics'
- # Checks
- assert 0 <= conf_thres <= 1, f"Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0"
- assert 0 <= iou_thres <= 1, f"Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0"
- if isinstance(prediction, (list, tuple)): # YOLOv8 model in validation model, output = (inference_out, loss_out)
- prediction = prediction[0] # select only inference output
- if classes is not None:
- classes = torch.tensor(classes, device=prediction.device)
- if prediction.shape[-1] == 6: # end-to-end model (BNC, i.e. 1,300,6)
- output = [pred[pred[:, 4] > conf_thres] for pred in prediction]
- if classes is not None:
- output = [pred[(pred[:, 5:6] == classes).any(1)] for pred in output]
- return output
- bs = prediction.shape[0] # batch size (BCN, i.e. 1,84,6300)
- nc = nc or (prediction.shape[1] - 4) # number of classes
- nm = prediction.shape[1] - nc - 4 # number of masks
- mi = 4 + nc # mask start index
- xc = prediction[:, 4:mi].amax(1) > conf_thres # candidates
- # Settings
- # min_wh = 2 # (pixels) minimum box width and height
- time_limit = 2.0 + max_time_img * bs # seconds to quit after
- multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
- prediction = prediction.transpose(-1, -2) # shape(1,84,6300) to shape(1,6300,84)
- if not rotated:
- if in_place:
- prediction[..., :4] = xywh2xyxy(prediction[..., :4]) # xywh to xyxy
- else:
- prediction = torch.cat((xywh2xyxy(prediction[..., :4]), prediction[..., 4:]), dim=-1) # xywh to xyxy
- t = time.time()
- output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs
- for xi, x in enumerate(prediction): # image index, image inference
- # Apply constraints
- # x[((x[:, 2:4] < min_wh) | (x[:, 2:4] > max_wh)).any(1), 4] = 0 # width-height
- x = x[xc[xi]] # confidence
- # Cat apriori labels if autolabelling
- if labels and len(labels[xi]) and not rotated:
- lb = labels[xi]
- v = torch.zeros((len(lb), nc + nm + 4), device=x.device)
- v[:, :4] = xywh2xyxy(lb[:, 1:5]) # box
- v[range(len(lb)), lb[:, 0].long() + 4] = 1.0 # cls
- x = torch.cat((x, v), 0)
- # If none remain process next image
- if not x.shape[0]:
- continue
- # Detections matrix nx6 (xyxy, conf, cls)
- box, cls, mask = x.split((4, nc, nm), 1)
- if multi_label:
- i, j = torch.where(cls > conf_thres)
- x = torch.cat((box[i], x[i, 4 + j, None], j[:, None].float(), mask[i]), 1)
- else: # best class only
- conf, j = cls.max(1, keepdim=True)
- x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres]
- # Filter by class
- if classes is not None:
- x = x[(x[:, 5:6] == classes).any(1)]
- # Check shape
- n = x.shape[0] # number of boxes
- if not n: # no boxes
- continue
- if n > max_nms: # excess boxes
- x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence and remove excess boxes
- # Batched NMS
- c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
- scores = x[:, 4] # scores
- if rotated:
- boxes = torch.cat((x[:, :2] + c, x[:, 2:4], x[:, -1:]), dim=-1) # xywhr
- i = nms_rotated(boxes, scores, iou_thres)
- else:
- boxes = x[:, :4] + c # boxes (offset by class)
- i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
- # i = soft_nms(boxes, scores, iou_thres) $ Soft-NMS
- i = i[:max_det] # limit detections
- # # Experimental
- # merge = False # use merge-NMS
- # if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
- # # Update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
- # from .metrics import box_iou
- # iou = box_iou(boxes[i], boxes) > iou_thres # IoU matrix
- # weights = iou * scores[None] # box weights
- # x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
- # redundant = True # require redundant detections
- # if redundant:
- # i = i[iou.sum(1) > 1] # require redundancy
- output[xi] = x[i]
- if (time.time() - t) > time_limit:
- LOGGER.warning(f"WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded")
- break # time limit exceeded
- return output
- def clip_boxes(boxes, shape):
- """
- Takes a list of bounding boxes and a shape (height, width) and clips the bounding boxes to the shape.
- Args:
- boxes (torch.Tensor): the bounding boxes to clip
- shape (tuple): the shape of the image
- Returns:
- (torch.Tensor | numpy.ndarray): Clipped boxes
- """
- if isinstance(boxes, torch.Tensor): # faster individually (WARNING: inplace .clamp_() Apple MPS bug)
- boxes[..., 0] = boxes[..., 0].clamp(0, shape[1]) # x1
- boxes[..., 1] = boxes[..., 1].clamp(0, shape[0]) # y1
- boxes[..., 2] = boxes[..., 2].clamp(0, shape[1]) # x2
- boxes[..., 3] = boxes[..., 3].clamp(0, shape[0]) # y2
- else: # np.array (faster grouped)
- boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1]) # x1, x2
- boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0]) # y1, y2
- return boxes
- def clip_coords(coords, shape):
- """
- Clip line coordinates to the image boundaries.
- Args:
- coords (torch.Tensor | numpy.ndarray): A list of line coordinates.
- shape (tuple): A tuple of integers representing the size of the image in the format (height, width).
- Returns:
- (torch.Tensor | numpy.ndarray): Clipped coordinates
- """
- if isinstance(coords, torch.Tensor): # faster individually (WARNING: inplace .clamp_() Apple MPS bug)
- coords[..., 0] = coords[..., 0].clamp(0, shape[1]) # x
- coords[..., 1] = coords[..., 1].clamp(0, shape[0]) # y
- else: # np.array (faster grouped)
- coords[..., 0] = coords[..., 0].clip(0, shape[1]) # x
- coords[..., 1] = coords[..., 1].clip(0, shape[0]) # y
- return coords
- def scale_image(masks, im0_shape, ratio_pad=None):
- """
- Takes a mask, and resizes it to the original image size.
- Args:
- masks (np.ndarray): resized and padded masks/images, [h, w, num]/[h, w, 3].
- im0_shape (tuple): the original image shape
- ratio_pad (tuple): the ratio of the padding to the original image.
- Returns:
- masks (torch.Tensor): The masks that are being returned.
- """
- # Rescale coordinates (xyxy) from im1_shape to im0_shape
- im1_shape = masks.shape
- if im1_shape[:2] == im0_shape[:2]:
- return masks
- if ratio_pad is None: # calculate from im0_shape
- gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new
- pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding
- else:
- # gain = ratio_pad[0][0]
- pad = ratio_pad[1]
- top, left = int(pad[1]), int(pad[0]) # y, x
- bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0])
- if len(masks.shape) < 2:
- raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}')
- masks = masks[top:bottom, left:right]
- masks = cv2.resize(masks, (im0_shape[1], im0_shape[0]))
- if len(masks.shape) == 2:
- masks = masks[:, :, None]
- return masks
- def xyxy2xywh(x):
- """
- Convert bounding box coordinates from (x1, y1, x2, y2) format to (x, y, width, height) format where (x1, y1) is the
- top-left corner and (x2, y2) is the bottom-right corner.
- Args:
- x (np.ndarray | torch.Tensor): The input bounding box coordinates in (x1, y1, x2, y2) format.
- Returns:
- y (np.ndarray | torch.Tensor): The bounding box coordinates in (x, y, width, height) format.
- """
- assert x.shape[-1] == 4, f"input shape last dimension expected 4 but input shape is {x.shape}"
- y = torch.empty_like(x) if isinstance(x, torch.Tensor) else np.empty_like(x) # faster than clone/copy
- y[..., 0] = (x[..., 0] + x[..., 2]) / 2 # x center
- y[..., 1] = (x[..., 1] + x[..., 3]) / 2 # y center
- y[..., 2] = x[..., 2] - x[..., 0] # width
- y[..., 3] = x[..., 3] - x[..., 1] # height
- return y
- def xywh2xyxy(x):
- """
- Convert bounding box coordinates from (x, y, width, height) format to (x1, y1, x2, y2) format where (x1, y1) is the
- top-left corner and (x2, y2) is the bottom-right corner. Note: ops per 2 channels faster than per channel.
- Args:
- x (np.ndarray | torch.Tensor): The input bounding box coordinates in (x, y, width, height) format.
- Returns:
- y (np.ndarray | torch.Tensor): The bounding box coordinates in (x1, y1, x2, y2) format.
- """
- assert x.shape[-1] == 4, f"input shape last dimension expected 4 but input shape is {x.shape}"
- y = torch.empty_like(x) if isinstance(x, torch.Tensor) else np.empty_like(x) # faster than clone/copy
- xy = x[..., :2] # centers
- wh = x[..., 2:] / 2 # half width-height
- y[..., :2] = xy - wh # top left xy
- y[..., 2:] = xy + wh # bottom right xy
- return y
- def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
- """
- Convert normalized bounding box coordinates to pixel coordinates.
- Args:
- x (np.ndarray | torch.Tensor): The bounding box coordinates.
- w (int): Width of the image. Defaults to 640
- h (int): Height of the image. Defaults to 640
- padw (int): Padding width. Defaults to 0
- padh (int): Padding height. Defaults to 0
- Returns:
- y (np.ndarray | torch.Tensor): The coordinates of the bounding box in the format [x1, y1, x2, y2] where
- x1,y1 is the top-left corner, x2,y2 is the bottom-right corner of the bounding box.
- """
- assert x.shape[-1] == 4, f"input shape last dimension expected 4 but input shape is {x.shape}"
- y = torch.empty_like(x) if isinstance(x, torch.Tensor) else np.empty_like(x) # faster than clone/copy
- y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x
- y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y
- y[..., 2] = w * (x[..., 0] + x[..., 2] / 2) + padw # bottom right x
- y[..., 3] = h * (x[..., 1] + x[..., 3] / 2) + padh # bottom right y
- return y
- def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
- """
- Convert bounding box coordinates from (x1, y1, x2, y2) format to (x, y, width, height, normalized) format. x, y,
- width and height are normalized to image dimensions.
- Args:
- x (np.ndarray | torch.Tensor): The input bounding box coordinates in (x1, y1, x2, y2) format.
- w (int): The width of the image. Defaults to 640
- h (int): The height of the image. Defaults to 640
- clip (bool): If True, the boxes will be clipped to the image boundaries. Defaults to False
- eps (float): The minimum value of the box's width and height. Defaults to 0.0
- Returns:
- y (np.ndarray | torch.Tensor): The bounding box coordinates in (x, y, width, height, normalized) format
- """
- if clip:
- x = clip_boxes(x, (h - eps, w - eps))
- assert x.shape[-1] == 4, f"input shape last dimension expected 4 but input shape is {x.shape}"
- y = torch.empty_like(x) if isinstance(x, torch.Tensor) else np.empty_like(x) # faster than clone/copy
- y[..., 0] = ((x[..., 0] + x[..., 2]) / 2) / w # x center
- y[..., 1] = ((x[..., 1] + x[..., 3]) / 2) / h # y center
- y[..., 2] = (x[..., 2] - x[..., 0]) / w # width
- y[..., 3] = (x[..., 3] - x[..., 1]) / h # height
- return y
- def xywh2ltwh(x):
- """
- Convert the bounding box format from [x, y, w, h] to [x1, y1, w, h], where x1, y1 are the top-left coordinates.
- Args:
- x (np.ndarray | torch.Tensor): The input tensor with the bounding box coordinates in the xywh format
- Returns:
- y (np.ndarray | torch.Tensor): The bounding box coordinates in the xyltwh format
- """
- y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
- y[..., 0] = x[..., 0] - x[..., 2] / 2 # top left x
- y[..., 1] = x[..., 1] - x[..., 3] / 2 # top left y
- return y
- def xyxy2ltwh(x):
- """
- Convert nx4 bounding boxes from [x1, y1, x2, y2] to [x1, y1, w, h], where xy1=top-left, xy2=bottom-right.
- Args:
- x (np.ndarray | torch.Tensor): The input tensor with the bounding boxes coordinates in the xyxy format
- Returns:
- y (np.ndarray | torch.Tensor): The bounding box coordinates in the xyltwh format.
- """
- y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
- y[..., 2] = x[..., 2] - x[..., 0] # width
- y[..., 3] = x[..., 3] - x[..., 1] # height
- return y
- def ltwh2xywh(x):
- """
- Convert nx4 boxes from [x1, y1, w, h] to [x, y, w, h] where xy1=top-left, xy=center.
- Args:
- x (torch.Tensor): the input tensor
- Returns:
- y (np.ndarray | torch.Tensor): The bounding box coordinates in the xywh format.
- """
- y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
- y[..., 0] = x[..., 0] + x[..., 2] / 2 # center x
- y[..., 1] = x[..., 1] + x[..., 3] / 2 # center y
- return y
- def xyxyxyxy2xywhr(x):
- """
- Convert batched Oriented Bounding Boxes (OBB) from [xy1, xy2, xy3, xy4] to [xywh, rotation]. Rotation values are
- expected in degrees from 0 to 90.
- Args:
- x (numpy.ndarray | torch.Tensor): Input box corners [xy1, xy2, xy3, xy4] of shape (n, 8).
- Returns:
- (numpy.ndarray | torch.Tensor): Converted data in [cx, cy, w, h, rotation] format of shape (n, 5).
- """
- is_torch = isinstance(x, torch.Tensor)
- points = x.cpu().numpy() if is_torch else x
- points = points.reshape(len(x), -1, 2)
- rboxes = []
- for pts in points:
- # NOTE: Use cv2.minAreaRect to get accurate xywhr,
- # especially some objects are cut off by augmentations in dataloader.
- (cx, cy), (w, h), angle = cv2.minAreaRect(pts)
- rboxes.append([cx, cy, w, h, angle / 180 * np.pi])
- return torch.tensor(rboxes, device=x.device, dtype=x.dtype) if is_torch else np.asarray(rboxes)
- def xywhr2xyxyxyxy(x):
- """
- Convert batched Oriented Bounding Boxes (OBB) from [xywh, rotation] to [xy1, xy2, xy3, xy4]. Rotation values should
- be in degrees from 0 to 90.
- Args:
- x (numpy.ndarray | torch.Tensor): Boxes in [cx, cy, w, h, rotation] format of shape (n, 5) or (b, n, 5).
- Returns:
- (numpy.ndarray | torch.Tensor): Converted corner points of shape (n, 4, 2) or (b, n, 4, 2).
- """
- cos, sin, cat, stack = (
- (torch.cos, torch.sin, torch.cat, torch.stack)
- if isinstance(x, torch.Tensor)
- else (np.cos, np.sin, np.concatenate, np.stack)
- )
- ctr = x[..., :2]
- w, h, angle = (x[..., i : i + 1] for i in range(2, 5))
- cos_value, sin_value = cos(angle), sin(angle)
- vec1 = [w / 2 * cos_value, w / 2 * sin_value]
- vec2 = [-h / 2 * sin_value, h / 2 * cos_value]
- vec1 = cat(vec1, -1)
- vec2 = cat(vec2, -1)
- pt1 = ctr + vec1 + vec2
- pt2 = ctr + vec1 - vec2
- pt3 = ctr - vec1 - vec2
- pt4 = ctr - vec1 + vec2
- return stack([pt1, pt2, pt3, pt4], -2)
- def ltwh2xyxy(x):
- """
- It converts the bounding box from [x1, y1, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right.
- Args:
- x (np.ndarray | torch.Tensor): the input image
- Returns:
- y (np.ndarray | torch.Tensor): the xyxy coordinates of the bounding boxes.
- """
- y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
- y[..., 2] = x[..., 2] + x[..., 0] # width
- y[..., 3] = x[..., 3] + x[..., 1] # height
- return y
- def segments2boxes(segments):
- """
- It converts segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)
- Args:
- segments (list): list of segments, each segment is a list of points, each point is a list of x, y coordinates
- Returns:
- (np.ndarray): the xywh coordinates of the bounding boxes.
- """
- boxes = []
- for s in segments:
- x, y = s.T # segment xy
- boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy
- return xyxy2xywh(np.array(boxes)) # cls, xywh
- def resample_segments(segments, n=1000):
- """
- Inputs a list of segments (n,2) and returns a list of segments (n,2) up-sampled to n points each.
- Args:
- segments (list): a list of (n,2) arrays, where n is the number of points in the segment.
- n (int): number of points to resample the segment to. Defaults to 1000
- Returns:
- segments (list): the resampled segments.
- """
- for i, s in enumerate(segments):
- s = np.concatenate((s, s[0:1, :]), axis=0)
- x = np.linspace(0, len(s) - 1, n)
- xp = np.arange(len(s))
- segments[i] = (
- np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)], dtype=np.float32).reshape(2, -1).T
- ) # segment xy
- return segments
- def crop_mask(masks, boxes):
- """
- It takes a mask and a bounding box, and returns a mask that is cropped to the bounding box.
- Args:
- masks (torch.Tensor): [n, h, w] tensor of masks
- boxes (torch.Tensor): [n, 4] tensor of bbox coordinates in relative point form
- Returns:
- (torch.Tensor): The masks are being cropped to the bounding box.
- """
- _, h, w = masks.shape
- x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(n,1,1)
- r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,1,w)
- c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(1,h,1)
- return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2))
- def process_mask_upsample(protos, masks_in, bboxes, shape):
- """
- Takes the output of the mask head, and applies the mask to the bounding boxes. This produces masks of higher quality
- but is slower.
- Args:
- protos (torch.Tensor): [mask_dim, mask_h, mask_w]
- masks_in (torch.Tensor): [n, mask_dim], n is number of masks after nms
- bboxes (torch.Tensor): [n, 4], n is number of masks after nms
- shape (tuple): the size of the input image (h,w)
- Returns:
- (torch.Tensor): The upsampled masks.
- """
- c, mh, mw = protos.shape # CHW
- masks = (masks_in @ protos.float().view(c, -1)).view(-1, mh, mw)
- masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW
- masks = crop_mask(masks, bboxes) # CHW
- return masks.gt_(0.0)
- def process_mask(protos, masks_in, bboxes, shape, upsample=False):
- """
- Apply masks to bounding boxes using the output of the mask head.
- Args:
- protos (torch.Tensor): A tensor of shape [mask_dim, mask_h, mask_w].
- masks_in (torch.Tensor): A tensor of shape [n, mask_dim], where n is the number of masks after NMS.
- bboxes (torch.Tensor): A tensor of shape [n, 4], where n is the number of masks after NMS.
- shape (tuple): A tuple of integers representing the size of the input image in the format (h, w).
- upsample (bool): A flag to indicate whether to upsample the mask to the original image size. Default is False.
- Returns:
- (torch.Tensor): A binary mask tensor of shape [n, h, w], where n is the number of masks after NMS, and h and w
- are the height and width of the input image. The mask is applied to the bounding boxes.
- """
- c, mh, mw = protos.shape # CHW
- ih, iw = shape
- masks = (masks_in @ protos.float().view(c, -1)).view(-1, mh, mw) # CHW
- width_ratio = mw / iw
- height_ratio = mh / ih
- downsampled_bboxes = bboxes.clone()
- downsampled_bboxes[:, 0] *= width_ratio
- downsampled_bboxes[:, 2] *= width_ratio
- downsampled_bboxes[:, 3] *= height_ratio
- downsampled_bboxes[:, 1] *= height_ratio
- masks = crop_mask(masks, downsampled_bboxes) # CHW
- if upsample:
- masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW
- return masks.gt_(0.0)
- def process_mask_native(protos, masks_in, bboxes, shape):
- """
- It takes the output of the mask head, and crops it after upsampling to the bounding boxes.
- Args:
- protos (torch.Tensor): [mask_dim, mask_h, mask_w]
- masks_in (torch.Tensor): [n, mask_dim], n is number of masks after nms
- bboxes (torch.Tensor): [n, 4], n is number of masks after nms
- shape (tuple): the size of the input image (h,w)
- Returns:
- masks (torch.Tensor): The returned masks with dimensions [h, w, n]
- """
- c, mh, mw = protos.shape # CHW
- masks = (masks_in @ protos.float().view(c, -1)).view(-1, mh, mw)
- masks = scale_masks(masks[None], shape)[0] # CHW
- masks = crop_mask(masks, bboxes) # CHW
- return masks.gt_(0.0)
- def scale_masks(masks, shape, padding=True):
- """
- Rescale segment masks to shape.
- Args:
- masks (torch.Tensor): (N, C, H, W).
- shape (tuple): Height and width.
- padding (bool): If True, assuming the boxes is based on image augmented by yolo style. If False then do regular
- rescaling.
- """
- mh, mw = masks.shape[2:]
- gain = min(mh / shape[0], mw / shape[1]) # gain = old / new
- pad = [mw - shape[1] * gain, mh - shape[0] * gain] # wh padding
- if padding:
- pad[0] /= 2
- pad[1] /= 2
- top, left = (int(pad[1]), int(pad[0])) if padding else (0, 0) # y, x
- bottom, right = (int(mh - pad[1]), int(mw - pad[0]))
- masks = masks[..., top:bottom, left:right]
- masks = F.interpolate(masks, shape, mode="bilinear", align_corners=False) # NCHW
- return masks
- def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None, normalize=False, padding=True):
- """
- Rescale segment coordinates (xy) from img1_shape to img0_shape.
- Args:
- img1_shape (tuple): The shape of the image that the coords are from.
- coords (torch.Tensor): the coords to be scaled of shape n,2.
- img0_shape (tuple): the shape of the image that the segmentation is being applied to.
- ratio_pad (tuple): the ratio of the image size to the padded image size.
- normalize (bool): If True, the coordinates will be normalized to the range [0, 1]. Defaults to False.
- padding (bool): If True, assuming the boxes is based on image augmented by yolo style. If False then do regular
- rescaling.
- Returns:
- coords (torch.Tensor): The scaled coordinates.
- """
- if ratio_pad is None: # calculate from img0_shape
- gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
- pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
- else:
- gain = ratio_pad[0][0]
- pad = ratio_pad[1]
- if padding:
- coords[..., 0] -= pad[0] # x padding
- coords[..., 1] -= pad[1] # y padding
- coords[..., 0] /= gain
- coords[..., 1] /= gain
- coords = clip_coords(coords, img0_shape)
- if normalize:
- coords[..., 0] /= img0_shape[1] # width
- coords[..., 1] /= img0_shape[0] # height
- return coords
- def regularize_rboxes(rboxes):
- """
- Regularize rotated boxes in range [0, pi/2].
- Args:
- rboxes (torch.Tensor): Input boxes of shape(N, 5) in xywhr format.
- Returns:
- (torch.Tensor): The regularized boxes.
- """
- x, y, w, h, t = rboxes.unbind(dim=-1)
- # Swap edge and angle if h >= w
- w_ = torch.where(w > h, w, h)
- h_ = torch.where(w > h, h, w)
- t = torch.where(w > h, t, t + math.pi / 2) % math.pi
- return torch.stack([x, y, w_, h_, t], dim=-1) # regularized boxes
- def masks2segments(masks, strategy="largest"):
- """
- It takes a list of masks(n,h,w) and returns a list of segments(n,xy)
- Args:
- masks (torch.Tensor): the output of the model, which is a tensor of shape (batch_size, 160, 160)
- strategy (str): 'concat' or 'largest'. Defaults to largest
- Returns:
- segments (List): list of segment masks
- """
- segments = []
- for x in masks.int().cpu().numpy().astype("uint8"):
- c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
- if c:
- if strategy == "concat": # concatenate all segments
- c = np.concatenate([x.reshape(-1, 2) for x in c])
- elif strategy == "largest": # select largest segment
- c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2)
- else:
- c = np.zeros((0, 2)) # no segments found
- segments.append(c.astype("float32"))
- return segments
- def convert_torch2numpy_batch(batch: torch.Tensor) -> np.ndarray:
- """
- Convert a batch of FP32 torch tensors (0.0-1.0) to a NumPy uint8 array (0-255), changing from BCHW to BHWC layout.
- Args:
- batch (torch.Tensor): Input tensor batch of shape (Batch, Channels, Height, Width) and dtype torch.float32.
- Returns:
- (np.ndarray): Output NumPy array batch of shape (Batch, Height, Width, Channels) and dtype uint8.
- """
- return (batch.permute(0, 2, 3, 1).contiguous() * 255).clamp(0, 255).to(torch.uint8).cpu().numpy()
- def clean_str(s):
- """
- Cleans a string by replacing special characters with underscore _
- Args:
- s (str): a string needing special characters replaced
- Returns:
- (str): a string with special characters replaced by an underscore _
- """
- return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
|