add yolo v10 and modify pipeline

This commit is contained in:
王庆刚
2025-03-28 13:19:54 +08:00
parent 183299c06b
commit 798c596acc
471 changed files with 19109 additions and 7342 deletions

View File

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
"""
Ultralytics Results, Boxes and Masks classes for handling inference results
Ultralytics Results, Boxes and Masks classes for handling inference results.
Usage: See https://docs.ultralytics.com/modes/predict/
"""
@ -13,17 +13,17 @@ import numpy as np
import torch
from ultralytics.data.augment import LetterBox
from ultralytics.utils import LOGGER, SimpleClass, deprecation_warn, ops
from ultralytics.utils import LOGGER, SimpleClass, ops
from ultralytics.utils.plotting import Annotator, colors, save_one_box
from ultralytics.utils.torch_utils import smart_inference_mode
class BaseTensor(SimpleClass):
"""
Base tensor class with additional methods for easy manipulation and device handling.
"""
"""Base tensor class with additional methods for easy manipulation and device handling."""
def __init__(self, data, orig_shape) -> None:
"""Initialize BaseTensor with data and original shape.
"""
Initialize BaseTensor with data and original shape.
Args:
data (torch.Tensor | np.ndarray): Predictions, such as bboxes, masks and keypoints.
@ -67,45 +67,63 @@ class Results(SimpleClass):
"""
A class for storing and manipulating inference results.
Args:
orig_img (numpy.ndarray): The original image as a numpy array.
path (str): The path to the image file.
names (dict): A dictionary of class names.
boxes (torch.tensor, optional): A 2D tensor of bounding box coordinates for each detection.
masks (torch.tensor, optional): A 3D tensor of detection masks, where each mask is a binary image.
probs (torch.tensor, optional): A 1D tensor of probabilities of each class for classification task.
keypoints (List[List[float]], optional): A list of detected keypoints for each object.
Attributes:
orig_img (numpy.ndarray): The original image as a numpy array.
orig_shape (tuple): The original image shape in (height, width) format.
boxes (Boxes, optional): A Boxes object containing the detection bounding boxes.
masks (Masks, optional): A Masks object containing the detection masks.
probs (Probs, optional): A Probs object containing probabilities of each class for classification task.
keypoints (Keypoints, optional): A Keypoints object containing detected keypoints for each object.
speed (dict): A dictionary of preprocess, inference, and postprocess speeds in milliseconds per image.
names (dict): A dictionary of class names.
path (str): The path to the image file.
_keys (tuple): A tuple of attribute names for non-empty attributes.
orig_img (numpy.ndarray): Original image as a numpy array.
orig_shape (tuple): Original image shape in (height, width) format.
boxes (Boxes, optional): Object containing detection bounding boxes.
masks (Masks, optional): Object containing detection masks.
probs (Probs, optional): Object containing class probabilities for classification tasks.
keypoints (Keypoints, optional): Object containing detected keypoints for each object.
speed (dict): Dictionary of preprocess, inference, and postprocess speeds (ms/image).
names (dict): Dictionary of class names.
path (str): Path to the image file.
Methods:
update(boxes=None, masks=None, probs=None, obb=None): Updates object attributes with new detection results.
cpu(): Returns a copy of the Results object with all tensors on CPU memory.
numpy(): Returns a copy of the Results object with all tensors as numpy arrays.
cuda(): Returns a copy of the Results object with all tensors on GPU memory.
to(*args, **kwargs): Returns a copy of the Results object with tensors on a specified device and dtype.
new(): Returns a new Results object with the same image, path, and names.
plot(...): Plots detection results on an input image, returning an annotated image.
show(): Show annotated results to screen.
save(filename): Save annotated results to file.
verbose(): Returns a log string for each task, detailing detections and classifications.
save_txt(txt_file, save_conf=False): Saves detection results to a text file.
save_crop(save_dir, file_name=Path("im.jpg")): Saves cropped detection images.
tojson(normalize=False): Converts detection results to JSON format.
"""
def __init__(self, orig_img, path, names, boxes=None, masks=None, probs=None, keypoints=None) -> None:
"""Initialize the Results class."""
def __init__(self, orig_img, path, names, boxes=None, masks=None, probs=None, keypoints=None, obb=None) -> None:
"""
Initialize the Results class.
Args:
orig_img (numpy.ndarray): The original image as a numpy array.
path (str): The path to the image file.
names (dict): A dictionary of class names.
boxes (torch.tensor, optional): A 2D tensor of bounding box coordinates for each detection.
masks (torch.tensor, optional): A 3D tensor of detection masks, where each mask is a binary image.
probs (torch.tensor, optional): A 1D tensor of probabilities of each class for classification task.
keypoints (torch.tensor, optional): A 2D tensor of keypoint coordinates for each detection.
obb (torch.tensor, optional): A 2D tensor of oriented bounding box coordinates for each detection.
"""
self.orig_img = orig_img
self.orig_shape = orig_img.shape[:2]
self.boxes = Boxes(boxes, self.orig_shape) if boxes is not None else None # native size boxes
self.masks = Masks(masks, self.orig_shape) if masks is not None else None # native size or imgsz masks
self.probs = Probs(probs) if probs is not None else None
self.keypoints = Keypoints(keypoints, self.orig_shape) if keypoints is not None else None
self.speed = {'preprocess': None, 'inference': None, 'postprocess': None} # milliseconds per image
self.obb = OBB(obb, self.orig_shape) if obb is not None else None
self.speed = {"preprocess": None, "inference": None, "postprocess": None} # milliseconds per image
self.names = names
self.path = path
self.save_dir = None
self._keys = 'boxes', 'masks', 'probs', 'keypoints'
self._keys = "boxes", "masks", "probs", "keypoints", "obb"
def __getitem__(self, idx):
"""Return a Results object for the specified index."""
return self._apply('__getitem__', idx)
return self._apply("__getitem__", idx)
def __len__(self):
"""Return the number of detections in the Results object."""
@ -114,17 +132,30 @@ class Results(SimpleClass):
if v is not None:
return len(v)
def update(self, boxes=None, masks=None, probs=None):
def update(self, boxes=None, masks=None, probs=None, obb=None):
"""Update the boxes, masks, and probs attributes of the Results object."""
if boxes is not None:
ops.clip_boxes(boxes, self.orig_shape) # clip boxes
self.boxes = Boxes(boxes, self.orig_shape)
self.boxes = Boxes(ops.clip_boxes(boxes, self.orig_shape), self.orig_shape)
if masks is not None:
self.masks = Masks(masks, self.orig_shape)
if probs is not None:
self.probs = probs
if obb is not None:
self.obb = OBB(obb, self.orig_shape)
def _apply(self, fn, *args, **kwargs):
"""
Applies a function to all non-empty attributes and returns a new Results object with modified attributes. This
function is internally called by methods like .to(), .cuda(), .cpu(), etc.
Args:
fn (str): The name of the function to apply.
*args: Variable length argument list to pass to the function.
**kwargs: Arbitrary keyword arguments to pass to the function.
Returns:
Results: A new Results object with attributes modified by the applied function.
"""
r = self.new()
for k in self._keys:
v = getattr(self, k)
@ -134,40 +165,42 @@ class Results(SimpleClass):
def cpu(self):
"""Return a copy of the Results object with all tensors on CPU memory."""
return self._apply('cpu')
return self._apply("cpu")
def numpy(self):
"""Return a copy of the Results object with all tensors as numpy arrays."""
return self._apply('numpy')
return self._apply("numpy")
def cuda(self):
"""Return a copy of the Results object with all tensors on GPU memory."""
return self._apply('cuda')
return self._apply("cuda")
def to(self, *args, **kwargs):
"""Return a copy of the Results object with tensors on the specified device and dtype."""
return self._apply('to', *args, **kwargs)
return self._apply("to", *args, **kwargs)
def new(self):
"""Return a new Results object with the same image, path, and names."""
return Results(orig_img=self.orig_img, path=self.path, names=self.names)
def plot(
self,
conf=True,
line_width=None,
font_size=None,
font='Arial.ttf',
pil=False,
img=None,
im_gpu=None,
kpt_radius=5,
kpt_line=True,
labels=True,
boxes=True,
masks=True,
probs=True,
**kwargs # deprecated args TODO: remove support in 8.2
self,
conf=True,
line_width=None,
font_size=None,
font="Arial.ttf",
pil=False,
img=None,
im_gpu=None,
kpt_radius=5,
kpt_line=True,
labels=True,
boxes=True,
masks=True,
probs=True,
show=False,
save=False,
filename=None,
):
"""
Plots the detection results on an input RGB image. Accepts a numpy array (cv2) or a PIL Image.
@ -186,6 +219,9 @@ class Results(SimpleClass):
boxes (bool): Whether to plot the bounding boxes.
masks (bool): Whether to plot the masks.
probs (bool): Whether to plot classification probability
show (bool): Whether to display the annotated image directly.
save (bool): Whether to save the annotated image to `filename`.
filename (str): Filename to save image to if save is True.
Returns:
(numpy.ndarray): A numpy array of the annotated image.
@ -207,19 +243,9 @@ class Results(SimpleClass):
if img is None and isinstance(self.orig_img, torch.Tensor):
img = (self.orig_img[0].detach().permute(1, 2, 0).contiguous() * 255).to(torch.uint8).cpu().numpy()
# Deprecation warn TODO: remove in 8.2
if 'show_conf' in kwargs:
deprecation_warn('show_conf', 'conf')
conf = kwargs['show_conf']
assert isinstance(conf, bool), '`show_conf` should be of boolean type, i.e, show_conf=True/False'
if 'line_thickness' in kwargs:
deprecation_warn('line_thickness', 'line_width')
line_width = kwargs['line_thickness']
assert isinstance(line_width, int), '`line_width` should be of int type, i.e, line_width=3'
names = self.names
pred_boxes, show_boxes = self.boxes, boxes
is_obb = self.obb is not None
pred_boxes, show_boxes = self.obb if is_obb else self.boxes, boxes
pred_masks, show_masks = self.masks, masks
pred_probs, show_probs = self.probs, probs
annotator = Annotator(
@ -228,28 +254,35 @@ class Results(SimpleClass):
font_size,
font,
pil or (pred_probs is not None and show_probs), # Classify tasks default to pil=True
example=names)
example=names,
)
# Plot Segment results
if pred_masks and show_masks:
if im_gpu is None:
img = LetterBox(pred_masks.shape[1:])(image=annotator.result())
im_gpu = torch.as_tensor(img, dtype=torch.float16, device=pred_masks.data.device).permute(
2, 0, 1).flip(0).contiguous() / 255
im_gpu = (
torch.as_tensor(img, dtype=torch.float16, device=pred_masks.data.device)
.permute(2, 0, 1)
.flip(0)
.contiguous()
/ 255
)
idx = pred_boxes.cls if pred_boxes else range(len(pred_masks))
annotator.masks(pred_masks.data, colors=[colors(x, True) for x in idx], im_gpu=im_gpu)
# Plot Detect results
if pred_boxes and show_boxes:
if pred_boxes is not None and show_boxes:
for d in reversed(pred_boxes):
c, conf, id = int(d.cls), float(d.conf) if conf else None, None if d.id is None else int(d.id.item())
name = ('' if id is None else f'id:{id} ') + names[c]
label = (f'{name} {conf:.2f}' if conf else name) if labels else None
annotator.box_label(d.xyxy.squeeze(), label, color=colors(c, True))
name = ("" if id is None else f"id:{id} ") + names[c]
label = (f"{name} {conf:.2f}" if conf else name) if labels else None
box = d.xyxyxyxy.reshape(-1, 4, 2).squeeze() if is_obb else d.xyxy.squeeze()
annotator.box_label(box, label, color=colors(c, True), rotated=is_obb)
# Plot Classify results
if pred_probs is not None and show_probs:
text = ',\n'.join(f'{names[j] if names else j} {pred_probs.data[j]:.2f}' for j in pred_probs.top5)
text = ",\n".join(f"{names[j] if names else j} {pred_probs.data[j]:.2f}" for j in pred_probs.top5)
x = round(self.orig_shape[0] * 0.03)
annotator.text([x, x], text, txt_color=(255, 255, 255)) # TODO: allow setting colors
@ -258,17 +291,34 @@ class Results(SimpleClass):
for k in reversed(self.keypoints.data):
annotator.kpts(k, self.orig_shape, radius=kpt_radius, kpt_line=kpt_line)
# Show results
if show:
annotator.show(self.path)
# Save results
if save:
annotator.save(filename)
return annotator.result()
def show(self, *args, **kwargs):
"""Show annotated results image."""
self.plot(show=True, *args, **kwargs)
def save(self, filename=None, *args, **kwargs):
"""Save annotated results image."""
if not filename:
filename = f"results_{Path(self.path).name}"
self.plot(save=True, filename=filename, *args, **kwargs)
return filename
def verbose(self):
"""
Return log string for each task.
"""
log_string = ''
"""Return log string for each task."""
log_string = ""
probs = self.probs
boxes = self.boxes
if len(self) == 0:
return log_string if probs is not None else f'{log_string}(no detections), '
return log_string if probs is not None else f"{log_string}(no detections), "
if probs is not None:
log_string += f"{', '.join(f'{self.names[j]} {probs.data[j]:.2f}' for j in probs.top5)}, "
if boxes:
@ -285,34 +335,35 @@ class Results(SimpleClass):
txt_file (str): txt file path.
save_conf (bool): save confidence score or not.
"""
boxes = self.boxes
is_obb = self.obb is not None
boxes = self.obb if is_obb else self.boxes
masks = self.masks
probs = self.probs
kpts = self.keypoints
texts = []
if probs is not None:
# Classify
[texts.append(f'{probs.data[j]:.2f} {self.names[j]}') for j in probs.top5]
[texts.append(f"{probs.data[j]:.2f} {self.names[j]}") for j in probs.top5]
elif boxes:
# Detect/segment/pose
for j, d in enumerate(boxes):
c, conf, id = int(d.cls), float(d.conf), None if d.id is None else int(d.id.item())
line = (c, *d.xywhn.view(-1))
line = (c, *(d.xyxyxyxyn.view(-1) if is_obb else d.xywhn.view(-1)))
if masks:
seg = masks[j].xyn[0].copy().reshape(-1) # reversed mask.xyn, (n,2) to (n*2)
line = (c, *seg)
if kpts is not None:
kpt = torch.cat((kpts[j].xyn, kpts[j].conf[..., None]), 2) if kpts[j].has_visible else kpts[j].xyn
line += (*kpt.reshape(-1).tolist(), )
line += (conf, ) * save_conf + (() if id is None else (id, ))
texts.append(('%g ' * len(line)).rstrip() % line)
line += (*kpt.reshape(-1).tolist(),)
line += (conf,) * save_conf + (() if id is None else (id,))
texts.append(("%g " * len(line)).rstrip() % line)
if texts:
Path(txt_file).parent.mkdir(parents=True, exist_ok=True) # make directory
with open(txt_file, 'a') as f:
f.writelines(text + '\n' for text in texts)
with open(txt_file, "a") as f:
f.writelines(text + "\n" for text in texts)
def save_crop(self, save_dir, file_name=Path('im.jpg')):
def save_crop(self, save_dir, file_name=Path("im.jpg")):
"""
Save cropped predictions to `save_dir/cls/file_name.jpg`.
@ -321,79 +372,105 @@ class Results(SimpleClass):
file_name (str | pathlib.Path): File name.
"""
if self.probs is not None:
LOGGER.warning('WARNING ⚠️ Classify task do not support `save_crop`.')
LOGGER.warning("WARNING ⚠️ Classify task do not support `save_crop`.")
return
if self.obb is not None:
LOGGER.warning("WARNING ⚠️ OBB task do not support `save_crop`.")
return
for d in self.boxes:
save_one_box(d.xyxy,
self.orig_img.copy(),
file=Path(save_dir) / self.names[int(d.cls)] / f'{Path(file_name).stem}.jpg',
BGR=True)
save_one_box(
d.xyxy,
self.orig_img.copy(),
file=Path(save_dir) / self.names[int(d.cls)] / f"{Path(file_name)}.jpg",
BGR=True,
)
def tojson(self, normalize=False):
"""Convert the object to JSON format."""
def summary(self, normalize=False, decimals=5):
"""Convert the results to a summarized format."""
if self.probs is not None:
LOGGER.warning('Warning: Classify task do not support `tojson` yet.')
LOGGER.warning("Warning: Classify results do not support the `summary()` method yet.")
return
import json
# Create list of detection dictionaries
results = []
data = self.boxes.data.cpu().tolist()
h, w = self.orig_shape if normalize else (1, 1)
for i, row in enumerate(data): # xyxy, track_id if tracking, conf, class_id
box = {'x1': row[0] / w, 'y1': row[1] / h, 'x2': row[2] / w, 'y2': row[3] / h}
conf = row[-2]
box = {
"x1": round(row[0] / w, decimals),
"y1": round(row[1] / h, decimals),
"x2": round(row[2] / w, decimals),
"y2": round(row[3] / h, decimals),
}
conf = round(row[-2], decimals)
class_id = int(row[-1])
name = self.names[class_id]
result = {'name': name, 'class': class_id, 'confidence': conf, 'box': box}
result = {"name": self.names[class_id], "class": class_id, "confidence": conf, "box": box}
if self.boxes.is_track:
result['track_id'] = int(row[-3]) # track ID
result["track_id"] = int(row[-3]) # track ID
if self.masks:
x, y = self.masks.xy[i][:, 0], self.masks.xy[i][:, 1] # numpy array
result['segments'] = {'x': (x / w).tolist(), 'y': (y / h).tolist()}
result["segments"] = {
"x": (self.masks.xy[i][:, 0] / w).round(decimals).tolist(),
"y": (self.masks.xy[i][:, 1] / h).round(decimals).tolist(),
}
if self.keypoints is not None:
x, y, visible = self.keypoints[i].data[0].cpu().unbind(dim=1) # torch Tensor
result['keypoints'] = {'x': (x / w).tolist(), 'y': (y / h).tolist(), 'visible': visible.tolist()}
result["keypoints"] = {
"x": (x / w).numpy().round(decimals).tolist(), # decimals named argument required
"y": (y / h).numpy().round(decimals).tolist(),
"visible": visible.numpy().round(decimals).tolist(),
}
results.append(result)
# Convert detections to JSON
return json.dumps(results, indent=2)
return results
def tojson(self, normalize=False, decimals=5):
"""Convert the results to JSON format."""
import json
return json.dumps(self.summary(normalize=normalize, decimals=decimals), indent=2)
class Boxes(BaseTensor):
"""
A class for storing and manipulating detection boxes.
Args:
boxes (torch.Tensor | numpy.ndarray): A tensor or numpy array containing the detection boxes,
with shape (num_boxes, 6) or (num_boxes, 7). The last two columns contain confidence and class values.
If present, the third last column contains track IDs.
orig_shape (tuple): Original image size, in the format (height, width).
Manages detection boxes, providing easy access and manipulation of box coordinates, confidence scores, class
identifiers, and optional tracking IDs. Supports multiple formats for box coordinates, including both absolute and
normalized forms.
Attributes:
xyxy (torch.Tensor | numpy.ndarray): The boxes in xyxy format.
conf (torch.Tensor | numpy.ndarray): The confidence values of the boxes.
cls (torch.Tensor | numpy.ndarray): The class values of the boxes.
id (torch.Tensor | numpy.ndarray): The track IDs of the boxes (if available).
xywh (torch.Tensor | numpy.ndarray): The boxes in xywh format.
xyxyn (torch.Tensor | numpy.ndarray): The boxes in xyxy format normalized by original image size.
xywhn (torch.Tensor | numpy.ndarray): The boxes in xywh format normalized by original image size.
data (torch.Tensor): The raw bboxes tensor (alias for `boxes`).
data (torch.Tensor): The raw tensor containing detection boxes and their associated data.
orig_shape (tuple): The original image size as a tuple (height, width), used for normalization.
is_track (bool): Indicates whether tracking IDs are included in the box data.
Properties:
xyxy (torch.Tensor | numpy.ndarray): Boxes in [x1, y1, x2, y2] format.
conf (torch.Tensor | numpy.ndarray): Confidence scores for each box.
cls (torch.Tensor | numpy.ndarray): Class labels for each box.
id (torch.Tensor | numpy.ndarray, optional): Tracking IDs for each box, if available.
xywh (torch.Tensor | numpy.ndarray): Boxes in [x, y, width, height] format, calculated on demand.
xyxyn (torch.Tensor | numpy.ndarray): Normalized [x1, y1, x2, y2] boxes, relative to `orig_shape`.
xywhn (torch.Tensor | numpy.ndarray): Normalized [x, y, width, height] boxes, relative to `orig_shape`.
Methods:
cpu(): Move the object to CPU memory.
numpy(): Convert the object to a numpy array.
cuda(): Move the object to CUDA memory.
to(*args, **kwargs): Move the object to the specified device.
cpu(): Moves the boxes to CPU memory.
numpy(): Converts the boxes to a numpy array format.
cuda(): Moves the boxes to CUDA (GPU) memory.
to(device, dtype=None): Moves the boxes to the specified device.
"""
def __init__(self, boxes, orig_shape) -> None:
"""Initialize the Boxes class."""
"""
Initialize the Boxes class.
Args:
boxes (torch.Tensor | numpy.ndarray): A tensor or numpy array containing the detection boxes, with
shape (num_boxes, 6) or (num_boxes, 7). The last two columns contain confidence and class values.
If present, the third last column contains track IDs.
orig_shape (tuple): Original image size, in the format (height, width).
"""
if boxes.ndim == 1:
boxes = boxes[None, :]
n = boxes.shape[-1]
assert n in (6, 7), f'expected `n` in [6, 7], but got {n}' # xyxy, track_id, conf, cls
assert n in (6, 7), f"expected 6 or 7 values but got {n}" # xyxy, track_id, conf, cls
super().__init__(boxes, orig_shape)
self.is_track = n == 7
self.orig_shape = orig_shape
@ -442,19 +519,12 @@ class Boxes(BaseTensor):
xywh[..., [1, 3]] /= self.orig_shape[0]
return xywh
@property
def boxes(self):
"""Return the raw bboxes tensor (deprecated)."""
LOGGER.warning("WARNING ⚠️ 'Boxes.boxes' is deprecated. Use 'Boxes.data' instead.")
return self.data
class Masks(BaseTensor):
"""
A class for storing and manipulating detection masks.
Attributes:
segments (list): Deprecated property for segments (normalized).
xy (list): A list of segments in pixel coordinates.
xyn (list): A list of normalized segments.
@ -471,22 +541,14 @@ class Masks(BaseTensor):
masks = masks[None, :]
super().__init__(masks, orig_shape)
@property
@lru_cache(maxsize=1)
def segments(self):
"""Return segments (normalized). Deprecated; use xyn property instead."""
LOGGER.warning(
"WARNING ⚠️ 'Masks.segments' is deprecated. Use 'Masks.xyn' for segments (normalized) and 'Masks.xy' for segments (pixels) instead."
)
return self.xyn
@property
@lru_cache(maxsize=1)
def xyn(self):
"""Return normalized segments."""
return [
ops.scale_coords(self.data.shape[1:], x, self.orig_shape, normalize=True)
for x in ops.masks2segments(self.data)]
for x in ops.masks2segments(self.data)
]
@property
@lru_cache(maxsize=1)
@ -494,13 +556,8 @@ class Masks(BaseTensor):
"""Return segments in pixel coordinates."""
return [
ops.scale_coords(self.data.shape[1:], x, self.orig_shape, normalize=False)
for x in ops.masks2segments(self.data)]
@property
def masks(self):
"""Return the raw masks tensor. Deprecated; use data attribute instead."""
LOGGER.warning("WARNING ⚠️ 'Masks.masks' is deprecated. Use 'Masks.data' instead.")
return self.data
for x in ops.masks2segments(self.data)
]
class Keypoints(BaseTensor):
@ -519,10 +576,14 @@ class Keypoints(BaseTensor):
to(device, dtype): Returns a copy of the keypoints tensor with the specified device and dtype.
"""
@smart_inference_mode() # avoid keypoints < conf in-place error
def __init__(self, keypoints, orig_shape) -> None:
"""Initializes the Keypoints object with detection keypoints and original image size."""
if keypoints.ndim == 2:
keypoints = keypoints[None, :]
if keypoints.shape[2] == 3: # x, y, conf
mask = keypoints[..., 2] < 0.5 # points with conf < 0.5 (not visible)
keypoints[..., :2][mask] = 0
super().__init__(keypoints, orig_shape)
self.has_visible = self.data.shape[-1] == 3
@ -566,6 +627,7 @@ class Probs(BaseTensor):
"""
def __init__(self, probs, orig_shape=None) -> None:
"""Initialize the Probs class with classification probabilities and optional original shape of the image."""
super().__init__(probs, orig_shape)
@property
@ -591,3 +653,91 @@ class Probs(BaseTensor):
def top5conf(self):
"""Return the confidences of top 5."""
return self.data[self.top5]
class OBB(BaseTensor):
"""
A class for storing and manipulating Oriented Bounding Boxes (OBB).
Args:
boxes (torch.Tensor | numpy.ndarray): A tensor or numpy array containing the detection boxes,
with shape (num_boxes, 7) or (num_boxes, 8). The last two columns contain confidence and class values.
If present, the third last column contains track IDs, and the fifth column from the left contains rotation.
orig_shape (tuple): Original image size, in the format (height, width).
Attributes:
xywhr (torch.Tensor | numpy.ndarray): The boxes in [x_center, y_center, width, height, rotation] format.
conf (torch.Tensor | numpy.ndarray): The confidence values of the boxes.
cls (torch.Tensor | numpy.ndarray): The class values of the boxes.
id (torch.Tensor | numpy.ndarray): The track IDs of the boxes (if available).
xyxyxyxyn (torch.Tensor | numpy.ndarray): The rotated boxes in xyxyxyxy format normalized by orig image size.
xyxyxyxy (torch.Tensor | numpy.ndarray): The rotated boxes in xyxyxyxy format.
xyxy (torch.Tensor | numpy.ndarray): The horizontal boxes in xyxyxyxy format.
data (torch.Tensor): The raw OBB tensor (alias for `boxes`).
Methods:
cpu(): Move the object to CPU memory.
numpy(): Convert the object to a numpy array.
cuda(): Move the object to CUDA memory.
to(*args, **kwargs): Move the object to the specified device.
"""
def __init__(self, boxes, orig_shape) -> None:
"""Initialize the Boxes class."""
if boxes.ndim == 1:
boxes = boxes[None, :]
n = boxes.shape[-1]
assert n in (7, 8), f"expected 7 or 8 values but got {n}" # xywh, rotation, track_id, conf, cls
super().__init__(boxes, orig_shape)
self.is_track = n == 8
self.orig_shape = orig_shape
@property
def xywhr(self):
"""Return the rotated boxes in xywhr format."""
return self.data[:, :5]
@property
def conf(self):
"""Return the confidence values of the boxes."""
return self.data[:, -2]
@property
def cls(self):
"""Return the class values of the boxes."""
return self.data[:, -1]
@property
def id(self):
"""Return the track IDs of the boxes (if available)."""
return self.data[:, -3] if self.is_track else None
@property
@lru_cache(maxsize=2)
def xyxyxyxy(self):
"""Return the boxes in xyxyxyxy format, (N, 4, 2)."""
return ops.xywhr2xyxyxyxy(self.xywhr)
@property
@lru_cache(maxsize=2)
def xyxyxyxyn(self):
"""Return the boxes in xyxyxyxy format, (N, 4, 2)."""
xyxyxyxyn = self.xyxyxyxy.clone() if isinstance(self.xyxyxyxy, torch.Tensor) else np.copy(self.xyxyxyxy)
xyxyxyxyn[..., 0] /= self.orig_shape[1]
xyxyxyxyn[..., 1] /= self.orig_shape[0]
return xyxyxyxyn
@property
@lru_cache(maxsize=2)
def xyxy(self):
"""
Return the horizontal boxes in xyxy format, (N, 4).
Accepts both torch and numpy boxes.
"""
x1 = self.xyxyxyxy[..., 0].min(1).values
x2 = self.xyxyxyxy[..., 0].max(1).values
y1 = self.xyxyxyxy[..., 1].min(1).values
y2 = self.xyxyxyxy[..., 1].max(1).values
xyxy = [x1, y1, x2, y2]
return np.stack(xyxy, axis=-1) if isinstance(self.data, np.ndarray) else torch.stack(xyxy, dim=-1)