224 lines
5.9 KiB
Python
224 lines
5.9 KiB
Python
# -*- coding: utf-8 -*-
|
|
"""
|
|
Created on Thu May 30 14:03:03 2024
|
|
|
|
@author: ym
|
|
"""
|
|
import os
|
|
import cv2
|
|
import numpy as np
|
|
from pathlib import Path
|
|
import sys
|
|
sys.path.append(r"D:\DetectTracking")
|
|
|
|
|
|
from tracking.utils.plotting import Annotator, colors
|
|
from tracking.utils import Boxes, IterableSimpleNamespace, yaml_load
|
|
from tracking.trackers import BOTSORT, BYTETracker
|
|
from tracking.dotrack.dotracks_back import doBackTracks
|
|
from tracking.dotrack.dotracks_front import doFrontTracks
|
|
from tracking.utils.drawtracks import plot_frameID_y2, draw_all_trajectories
|
|
|
|
W, H = 1024, 1280
|
|
|
|
Mode = 'front' #'back'
|
|
|
|
def read_data_file(datapath):
|
|
|
|
with open(datapath, 'r') as file:
|
|
lines = file.readlines()
|
|
Videos = []
|
|
FrameBoxes, FrameFeats = [], []
|
|
boxes, feats = [], []
|
|
|
|
bboxes, ffeats = [], []
|
|
timestamp = []
|
|
t1 = None
|
|
for line in lines:
|
|
if line.find('CameraId') >= 0:
|
|
t = int(line.split(',')[1].split(':')[1])
|
|
timestamp.append(t)
|
|
|
|
if len(boxes) and len(feats):
|
|
FrameBoxes.append(np.array(boxes, dtype = np.float32))
|
|
FrameFeats.append(np.array(feats, dtype = np.float32))
|
|
|
|
boxes, feats = [], []
|
|
|
|
if t1 and t - t1 > 1e4:
|
|
Videos.append((FrameBoxes, FrameFeats))
|
|
FrameBoxes, FrameFeats = [], []
|
|
t1 = int(line.split(',')[1].split(':')[1])
|
|
|
|
if line.find('box') >= 0:
|
|
box = line.split(':', )[1].split(',')[:-1]
|
|
boxes.append(box)
|
|
bboxes.append(boxes)
|
|
|
|
|
|
if line.find('feat') >= 0:
|
|
feat = line.split(':', )[1].split(',')[:-1]
|
|
feats.append(feat)
|
|
ffeats.append(feat)
|
|
|
|
|
|
|
|
|
|
FrameBoxes.append(np.array(boxes, dtype = np.float32))
|
|
FrameFeats.append(np.array(feats, dtype = np.float32))
|
|
Videos.append((FrameBoxes, FrameFeats))
|
|
|
|
TimeStamp = np.array(timestamp, dtype = np.float32)
|
|
DimesDiff = np.diff((timestamp))
|
|
|
|
return Videos
|
|
|
|
def video2imgs(path):
|
|
vpath = os.path.join(path, "videos")
|
|
|
|
k = 0
|
|
have = False
|
|
for filename in os.listdir(vpath):
|
|
file, ext = os.path.splitext(filename)
|
|
imgdir = os.path.join(path, file)
|
|
if os.path.exists(imgdir):
|
|
continue
|
|
else:
|
|
os.mkdir(imgdir)
|
|
|
|
vfile = os.path.join(vpath, filename)
|
|
cap = cv2.VideoCapture(vfile)
|
|
i = 0
|
|
while True:
|
|
ret, frame = cap.read()
|
|
if not ret:
|
|
break
|
|
|
|
i += 1
|
|
imgp = os.path.join(imgdir, file+f"_{i}.png")
|
|
cv2.imwrite(imgp, frame)
|
|
|
|
print(filename+f": {i}")
|
|
|
|
|
|
cap.release()
|
|
|
|
k+=1
|
|
if k==1000:
|
|
break
|
|
|
|
def draw_boxes():
|
|
datapath = r'D:\datasets\ym\videos_test\20240530\1_tracker_inout(1).data'
|
|
VideosData = read_data_file(datapath)
|
|
|
|
bboxes = VideosData[0][0]
|
|
ffeats = VideosData[0][1]
|
|
|
|
videopath = r"D:\datasets\ym\videos_test\20240530\134458234-1cd970cf-f8b9-4e80-9c2e-7ca3eec83b81-1_seek0.10415589124891511.mp4"
|
|
|
|
cap = cv2.VideoCapture(videopath)
|
|
i = 0
|
|
while True:
|
|
ret, frame = cap.read()
|
|
if not ret:
|
|
break
|
|
|
|
|
|
annotator = Annotator(frame.copy(), line_width=3)
|
|
|
|
|
|
boxes = bboxes[i]
|
|
|
|
for *xyxy, conf, cls in reversed(boxes):
|
|
label = f'{int(cls)}: {conf:.2f}'
|
|
|
|
color = colors(int(cls), True)
|
|
annotator.box_label(xyxy, label, color=color)
|
|
|
|
img = annotator.result()
|
|
|
|
imgpath = r"D:\datasets\ym\videos_test\20240530\result\int8_front\{}.png".format(i+1)
|
|
cv2.imwrite(imgpath, img)
|
|
|
|
print(f"Output: {i}")
|
|
i += 1
|
|
cap.release()
|
|
|
|
def init_tracker(tracker_yaml = None, bs=1):
|
|
"""
|
|
Initialize tracker for object tracking during prediction.
|
|
"""
|
|
TRACKER_MAP = {'bytetrack': BYTETracker, 'botsort': BOTSORT}
|
|
cfg = IterableSimpleNamespace(**yaml_load(tracker_yaml))
|
|
|
|
tracker = TRACKER_MAP[cfg.tracker_type](args=cfg, frame_rate=30)
|
|
|
|
return tracker
|
|
|
|
def tracking(bboxes, ffeats):
|
|
tracker_yaml = r"./trackers/cfg/botsort.yaml"
|
|
tracker = init_tracker(tracker_yaml)
|
|
|
|
track_boxes = np.empty((0, 9), dtype = np.float32)
|
|
features_dict = {}
|
|
|
|
'''==================== 执行跟踪处理 ======================='''
|
|
for dets, feats in zip(bboxes, ffeats):
|
|
# 需要根据frame_id重排序
|
|
det_tracking = Boxes(dets).cpu().numpy()
|
|
tracks = tracker.update(det_tracking, feats)
|
|
|
|
if len(tracks):
|
|
track_boxes = np.concatenate([track_boxes, tracks], axis=0)
|
|
feat_dict = {int(x.idx): x.curr_feat for x in tracker.tracked_stracks if x.is_activated}
|
|
frame_id = tracks[0, 7]
|
|
features_dict.update({int(frame_id): feat_dict})
|
|
|
|
return det_tracking, features_dict
|
|
|
|
|
|
|
|
def main():
|
|
datapath = r'D:\datasets\ym\videos_test\20240530\1_tracker_inout(1).data'
|
|
VideosData = read_data_file(datapath)
|
|
|
|
bboxes = VideosData[0][0]
|
|
ffeats = VideosData[0][1]
|
|
|
|
bboxes, feats_dict = tracking(bboxes, ffeats)
|
|
|
|
|
|
|
|
if Mode == "front":
|
|
vts = doFrontTracks(bboxes, feats_dict)
|
|
vts.classify()
|
|
|
|
|
|
plt = plot_frameID_y2(vts)
|
|
plt.savefig('front_y2.png')
|
|
# plt.close()
|
|
else:
|
|
vts = doBackTracks(bboxes, feats_dict)
|
|
vts.classify()
|
|
|
|
edgeline = cv2.imread("./shopcart/cart_tempt/edgeline.png")
|
|
draw_all_trajectories(vts, edgeline, save_dir, filename)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
filename = 'traj.png'
|
|
save_dir = Path('./result')
|
|
if not save_dir.exists():
|
|
save_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
main()
|
|
|
|
|