modified for site test

This commit is contained in:
王庆刚
2024-07-18 17:52:12 +08:00
parent f90ef72cbf
commit e986ec060b
39 changed files with 2279 additions and 375 deletions

Binary file not shown.

View File

@ -0,0 +1,365 @@
import os.path
import shutil
import numpy as np
import matplotlib.pyplot as plt
import cv2
from utils.plotting import Annotator, colors
import sys
sys.path.append(r"D:\DetectTracking")
from tracking.utils.read_data import extract_data, read_deletedBarcode_file, read_tracking_output
from tracking.utils.plotting import draw_tracking_boxes
def showHist(err, correct):
err = np.array(err)
correct = np.array(correct)
fig, axs = plt.subplots(2, 1)
axs[0].hist(err, bins=50, edgecolor='black')
axs[0].set_xlim([0, 1])
axs[0].set_title('err')
axs[1].hist(correct, bins=50, edgecolor='black')
axs[1].set_xlim([0, 1])
axs[1].set_title('correct')
plt.show()
def showgrid(recall, prec, ths):
# x = np.linspace(start=-0, stop=1, num=11, endpoint=True).tolist()
fig = plt.figure(figsize=(10, 6))
plt.plot(ths, recall, color='red', label='recall')
plt.plot(ths, prec, color='blue', label='PrecisePos')
plt.legend()
plt.xlabel('threshold')
# plt.ylabel('Similarity')
plt.grid(True, linestyle='--', alpha=0.5)
plt.savefig('accuracy_recall_grid.png')
plt.show()
# plt.close()
def compute_recall_precision(err_similarity, correct_similarity):
ths = np.linspace(0, 1, 11)
recall, prec = [], []
for th in ths:
TP = len([num for num in correct_similarity if num >= th])
FP = len([num for num in err_similarity if num >= th])
if (TP+FP) == 0:
prec.append(1)
recall.append(0)
else:
prec.append(TP / (TP + FP))
recall.append(TP / (len(err_similarity) + len(correct_similarity)))
showgrid(recall, prec, ths)
return recall, prec
# =============================================================================
# def read_tracking_output(filepath):
# boxes = []
# feats = []
# with open(filepath, 'r', encoding='utf-8') as file:
# for line in file:
# line = line.strip() # 去除行尾的换行符和可能的空白字符
#
# if not line:
# continue
#
# if line.endswith(','):
# line = line[:-1]
#
# data = np.array([float(x) for x in line.split(",")])
# if data.size == 9:
# boxes.append(data)
# if data.size == 256:
# feats.append(data)
#
# return np.array(boxes), np.array(feats)
# =============================================================================
def read_tracking_imgs(imgspath):
'''
input:
imgspath该路径中的图像为Yolo算法的输入图像640x512
output
imgs_0后摄图像根据 frameId 进行了排序
imgs_1前摄图像根据 frameId 进行了排序
'''
imgs_0, frmIDs_0, imgs_1, frmIDs_1 = [], [], [], []
for filename in os.listdir(imgspath):
file, ext = os.path.splitext(filename)
flist = file.split('_')
if len(flist)==4 and ext==".jpg":
camID, frmID = flist[0], int(flist[-1])
imgpath = os.path.join(imgspath, filename)
img = cv2.imread(imgpath)
if camID=='0':
imgs_0.append(img)
frmIDs_0.append(frmID)
if camID=='1':
imgs_1.append(img)
frmIDs_1.append(frmID)
if len(frmIDs_0):
indice = np.argsort(np.array(frmIDs_0))
imgs_0 = [imgs_0[i] for i in indice ]
if len(frmIDs_1):
indice = np.argsort(np.array(frmIDs_1))
imgs_1 = [imgs_1[i] for i in indice ]
return imgs_0, imgs_1
# =============================================================================
# def draw_tracking_boxes(imgs, tracks):
# '''tracks: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]
# 0 1 2 3 4 5 6 7 8
# 关键imgs中的次序和 track 中的 fid 对应
# '''
# subimgs = []
# for *xyxy, tid, conf, cls, fid, bid in tracks:
# label = f'id:{int(tid)}_{int(cls)}_{conf:.2f}'
#
# annotator = Annotator(imgs[int(fid-1)].copy())
# if cls==0:
# color = colors(int(cls), True)
# elif tid>0 and cls!=0:
# color = colors(int(tid), True)
# else:
# color = colors(19, True) # 19为调色板的最后一个元素
#
# pt2 = [p/2 for p in xyxy]
# annotator.box_label(pt2, label, color=color)
# img0 = annotator.result()
#
# subimgs.append(img0)
#
# return subimgs
# =============================================================================
def get_contrast_paths(pair, basepath):
assert(len(pair)==2 or len(pair)==3), "pair: seqdir, delete, barcodes"
getout_fold = pair[0] # 取出操作对应的文件夹
relvt_barcode = pair[1] # 取出操作对应放入操作的 Barcode
if len(pair)==3:
error_match = pair[2] # 取出操作错误匹配的 Barcode
else:
error_match = ''
getoutpath, inputpath, errorpath = '', '', ''
day, hms = getout_fold.strip('_').split('-')
input_folds, times = [], []
errmatch_folds, errmatch_times = [], []
for pathname in os.listdir(basepath):
if pathname.endswith('_'): continue
if os.path.isfile(os.path.join(basepath, pathname)):continue
infold = pathname.split('_')
if len(infold)!=2: continue
day1, hms1 = infold[0].split('-')
if day1==day and infold[1]==relvt_barcode and int(hms1)<int(hms):
input_folds.append(pathname)
times.append(int(hms1))
if day1==day and len(error_match) and infold[1]==error_match and int(hms1)<int(hms):
errmatch_folds.append(pathname)
errmatch_times.append(int(hms1))
''' 根据时间排序,选择离取出操作最近时间的文件夹,
作为取出操作应正确匹配的放入操作所对应的文件夹 '''
if len(input_folds):
indice = np.argsort(np.array(times))
input_fold = input_folds[indice[-1]]
inputpath = os.path.join(basepath, input_fold)
'''取出操作错误匹配的放入操作对应的文件夹'''
if len(errmatch_folds):
indice = np.argsort(np.array(errmatch_times))
errmatch_fold = errmatch_folds[indice[-1]]
errorpath = os.path.join(basepath, errmatch_fold)
'''放入事件文件夹地址、取出事件文件夹地址'''
getoutpath = os.path.join(basepath, getout_fold)
return getoutpath, inputpath, errorpath
def save_tracking_imgpairs(pair, basepath, savepath):
'''
basepath: 原始测试数据文件夹的路径
savepath: 保存的目标文件夹
'''
getoutpath, inputpath, errorpath = get_contrast_paths(pair, basepath)
if len(inputpath)==0:
return
'''==== 读取放入、取出事件对应的 Yolo输入的前后摄图像0后摄1前摄 ===='''
'''==== 读取放入、取出事件对应的 tracking 输出boxes, feats ===='''
if len(inputpath):
imgs_input_0, imgs_input_1 = read_tracking_imgs(inputpath)
input_data_0 = os.path.join(inputpath, '0_tracking_output.data')
input_data_1 = os.path.join(inputpath, '1_tracking_output.data')
boxes_input_0, feats_input_0 = read_tracking_output(input_data_0)
boxes_input_1, feats_input_1 = read_tracking_output(input_data_1)
ImgsInput_0 = draw_tracking_boxes(imgs_input_0, boxes_input_0)
ImgsInput_1 = draw_tracking_boxes(imgs_input_1, boxes_input_1)
if len(getoutpath):
imgs_getout_0, imgs_getout_1 = read_tracking_imgs(getoutpath)
getout_data_0 = os.path.join(getoutpath, '0_tracking_output.data')
getout_data_1 = os.path.join(getoutpath, '1_tracking_output.data')
boxes_output_0, feats_output_0 = read_tracking_output(getout_data_0)
boxes_output_1, feats_output_1 = read_tracking_output(getout_data_1)
ImgsGetout_0 = draw_tracking_boxes(imgs_getout_0, boxes_output_0)
ImgsGetout_1 = draw_tracking_boxes(imgs_getout_1, boxes_output_1)
if len(errorpath):
imgs_error_0, imgs_error_1 = read_tracking_imgs(errorpath)
error_data_0 = os.path.join(errorpath, '0_tracking_output.data')
error_data_1 = os.path.join(errorpath, '1_tracking_output.data')
boxes_error_0, feats_error_0 = read_tracking_output(error_data_0)
boxes_error_1, feats_error_1 = read_tracking_output(error_data_1)
ImgsError_0 = draw_tracking_boxes(imgs_error_0, boxes_error_0)
ImgsError_1 = draw_tracking_boxes(imgs_error_1, boxes_error_1)
savedir = pair[0] + pair[1]
if len(errorpath):
savedir = savedir + '_' + errorpath.split('_')[-1]
foldname = os.path.join(savepath, 'imgpairs', savedir)
if not os.path.exists(foldname):
os.makedirs(foldname)
for i, img in enumerate(ImgsInput_0):
imgpath = os.path.join(foldname, f'input_0_{i}.png')
cv2.imwrite(imgpath, img)
for i, img in enumerate(ImgsInput_1):
imgpath = os.path.join(foldname, f'input_1_{i}.png')
cv2.imwrite(imgpath, img)
for i, img in enumerate(ImgsGetout_0):
imgpath = os.path.join(foldname, f'getout_0_{i}.png')
cv2.imwrite(imgpath, img)
for i, img in enumerate(ImgsGetout_1):
imgpath = os.path.join(foldname, f'getout_1_{i}.png')
cv2.imwrite(imgpath, img)
for i, img in enumerate(ImgsError_0):
imgpath = os.path.join(foldname, f'errMatch_0_{i}.png')
cv2.imwrite(imgpath, img)
for i, img in enumerate(ImgsError_1):
imgpath = os.path.join(foldname, f'errMatch_1_{i}.png')
cv2.imwrite(imgpath, img)
def performance_evaluate(all_list, isshow=False):
corrpairs, correct_barcode_list, correct_similarity, errpairs, err_barcode_list, err_similarity = [], [], [], [], [], []
for s_list in all_list:
seqdir = s_list['SeqDir'].strip()
delete = s_list['Deleted'].strip()
barcodes = [s.strip() for s in s_list['barcode']]
similarity = [float(s.strip()) for s in s_list['similarity']]
if delete in barcodes[:1]:
corrpairs.append((seqdir, delete))
correct_barcode_list.append(delete)
correct_similarity.append(similarity[0])
else:
errpairs.append((seqdir, delete, barcodes[0]))
err_barcode_list.append(delete)
err_similarity.append(similarity[0])
'''3. 计算比对性能 '''
if isshow:
compute_recall_precision(err_similarity, correct_similarity)
showHist(err_similarity, correct_similarity)
return errpairs, corrpairs, err_similarity, correct_similarity
def contrast_analysis(del_barcode_file, basepath, savepath, saveimgs=False):
'''
del_barcode_file: 测试数据文件,利用该文件进行算法性能分析
'''
'''1. 读取 deletedBarcode 文件 '''
all_list = read_deletedBarcode_file(del_barcode_file)
'''2. 算法性能评估,并输出 (取出,删除, 错误匹配) 对 '''
errpairs, corrpairs, _, _ = performance_evaluate(all_list)
'''3. 获取 (取出,删除, 错误匹配) 对应路径,保存相应轨迹图像'''
relative_paths = []
for errpair in errpairs:
GetoutPath, InputPath, ErrorPath = get_contrast_paths(errpair, basepath)
relative_paths.append((GetoutPath, InputPath, ErrorPath))
if saveimgs:
save_tracking_imgpairs(errpair, basepath, savepath)
return relative_paths
def main():
del_barcode_file = 'D:/contrast/dataset/compairsonResult/deletedBarcode_20240709_pm.txt'
basepath = r'D:\contrast\dataset\1_to_n\709'
savepath = r'D:\contrast\dataset\result'
try:
relative_path = contrast_analysis(del_barcode_file, basepath, savepath)
except Exception as e:
print(f'Error Type: {e}')
if __name__ == '__main__':
main()

View File

@ -0,0 +1 @@
文件夹 trackdicts_20240608 和 trackdicts_1 下的数据为和手部关联前的跟踪结果数据

View File

@ -94,6 +94,7 @@ class Track:
self.cls = int(boxes[0, 6])
self.frnum = boxes.shape[0]
self.imgBorder = False
self.isCornpoint = False
self.imgshape = imgshape
self.state = MoveState.Unknown
@ -101,9 +102,13 @@ class Track:
self.start_fid = int(np.min(boxes[:, 7]))
self.end_fid = int(np.max(boxes[:, 7]))
''''''
self.Hands = []
self.HandsIou = []
self.Goods = []
self.GoodsIou = []
'''5个关键点中心点、左上点、右上点、左下点、右下点 )坐标'''
@ -113,7 +118,7 @@ class Track:
(中心点、左上点、右上点、左下点、右下点 )轨迹特征'''
self.compute_cornpts_feats()
'''应计算各个角点面积、平均面积'''
mw, mh = np.mean(boxes[:, 2]-boxes[:, 0]), np.mean((boxes[:, 3]-boxes[:, 1]))
self.mwh = np.mean((mw, mh))
self.Area = mw * mh

View File

@ -55,6 +55,7 @@ class doBackTracks(doTracks):
# tracks = self.sub_tracks(tracks, out_trcak)
[self.associate_with_hand(htrack, gtrack) for htrack in hand_tracks for gtrack in tracks]
'''轨迹循环归并'''
# merged_tracks = self.merge_tracks(tracks)
merged_tracks = self.merge_tracks_loop(tracks)
@ -66,17 +67,28 @@ class doBackTracks(doTracks):
self.Static.extend(static_tracks)
tracks = self.sub_tracks(tracks, static_tracks)
for gtrack in tracks:
# print(f"Goods ID:{gtrack.tid}")
for htrack in hand_tracks:
hand_ious = self.associate_with_hand(htrack, gtrack)
if len(hand_ious):
gtrack.Hands.append(htrack)
gtrack.HandsIou.append(hand_ious)
self.Residual = tracks
# for gtrack in tracks:
# for htrack in hand_tracks:
# hand_ious = self.associate_with_hand(htrack, gtrack)
# if len(hand_ious):
# gtrack.Hands.append(htrack)
# gtrack.HandsIou.append(hand_ious)
# htrack.Goods.append((gtrack, hand_ious))
# for htrack in hand_tracks:
# self.merge_based_hands(htrack)
self.Residual = tracks
# def merge_based_hands(self, htrack):
# gtracks = htrack.Goods
# if len(gtracks) >= 2:
# atrack, afious = gtracks[0]
# btrack, bfious = gtracks[1]
def associate_with_hand(self, htrack, gtrack):
'''
迁移至基类:
@ -91,6 +103,7 @@ class doBackTracks(doTracks):
hboxes = np.empty(shape=(0, 9), dtype = np.float)
gboxes = np.empty(shape=(0, 9), dtype = np.float)
# start, end 为索引值,需要 start:(end+1)
for start, end in htrack.moving_index:
@ -99,18 +112,17 @@ class doBackTracks(doTracks):
gboxes = np.concatenate((gboxes, gtrack.boxes[start:end+1, :]), axis=0)
hfids, gfids = hboxes[:, 7], gboxes[:, 7]
fids = set(hfids).intersection(set(gfids))
fids = sorted(set(hfids).intersection(set(gfids)))
if len(fids)==0:
return hand_ious
return None
# print(f"Goods ID: {gtrack.tid}, Hand ID: {htrack.tid}")
for f in fids:
h = np.where(hfids==f)[0][0]
g = np.where(gfids==f)[0][0]
h = np.where(hboxes[:,7] == f)[0][0]
g = np.where(gboxes[:,7] == f)[0][0]
x11, y11, x12, y12 = hboxes[h, 0:4]
x21, y21, x22, y22 = gboxes[g, 0:4]
@ -124,10 +136,11 @@ class doBackTracks(doTracks):
iou = union / (area1 + area2 - union + 1e-6)
if iou>0:
hand_ious.append((f, iou))
if iou >= 0.01:
gtrack.Hands.append((htrack.tid, f, iou))
return hand_ious
return gtrack.Hands
def merge_tracks(self, Residual):
"""

View File

@ -44,21 +44,25 @@ class doFrontTracks(doTracks):
'''剔除静止目标后的 tracks'''
tracks = self.sub_tracks(tracks, static_tracks)
[self.associate_with_hand(htrack, gtrack) for htrack in hand_tracks for gtrack in tracks]
'''轨迹循环归并'''
merged_tracks = self.merge_tracks_loop(tracks)
tracks = [t for t in merged_tracks if t.frnum > 1]
for gtrack in tracks:
# print(f"Goods ID:{gtrack.tid}")
for htrack in hand_tracks:
hand_ious = self.associate_with_hand(htrack, gtrack)
if len(hand_ious):
gtrack.Hands.append(htrack)
gtrack.HandsIou.append(hand_ious)
# for gtrack in tracks:
# # print(f"Goods ID:{gtrack.tid}")
# for htrack in hand_tracks:
# hand_ious = self.associate_with_hand(htrack, gtrack)
# if len(hand_ious):
# gtrack.Hands.append(htrack)
# gtrack.HandsIou.append(hand_ious)
'''静止 tracks 判断与剔除静止 tracks'''
static_tracks = [t for t in tracks if t.frnum>1 and t.is_static()]
tracks = self.sub_tracks(tracks, static_tracks)
freemoved_tracks = [t for t in tracks if t.is_free_move()]
tracks = self.sub_tracks(tracks, freemoved_tracks)
@ -73,10 +77,8 @@ class doFrontTracks(doTracks):
a. 运动帧的帧索引有交集
b. 帧索引交集部分iou均大于0
'''
assert htrack.cls==0 and gtrack.cls!=0 and gtrack.cls!=9, 'Track cls is Error!'
hand_ious = []
hboxes = np.empty(shape=(0, 9), dtype = np.float)
gboxes = np.empty(shape=(0, 9), dtype = np.float)
@ -87,14 +89,12 @@ class doFrontTracks(doTracks):
gboxes = np.concatenate((gboxes, gtrack.boxes[start:end+1, :]), axis=0)
hfids, gfids = hboxes[:, 7], gboxes[:, 7]
fids = set(hfids).intersection(set(gfids))
fids = sorted(set(hfids).intersection(set(gfids)))
if len(fids)==0:
return hand_ious
return None
# print(f"Goods ID: {gtrack.tid}, Hand ID: {htrack.tid}")
ious = []
for f in fids:
h = np.where(hfids==f)[0][0]
g = np.where(gfids==f)[0][0]
@ -111,10 +111,10 @@ class doFrontTracks(doTracks):
iou = union / (area1 + area2 - union + 1e-6)
if iou>0:
hand_ious.append((f, iou))
return hand_ious
if iou >= 0.01:
gtrack.Hands.append((htrack.tid, f, iou))
return gtrack.Hands

View File

@ -30,6 +30,26 @@ from utils.drawtracks import plot_frameID_y2, draw_all_trajectories
from utils.mergetrack import readDict
import csv
def read_csv_file():
file_path = r'D:\DeepLearning\yolov5_track\tracking\matching\featdata\Similarity.csv'
with open(file_path, mode='r', newline='') as file:
data = list(csv.reader(file))
matrix = []
for i in range(1, len(data)):
matrix.append(data[i][1:])
matrix = np.array(matrix, dtype = np.float32)
simil = 1 + (matrix-1)/2
print("done!!!")
def get_img_filename(imgpath = r'./matching/images/' ):
@ -747,7 +767,7 @@ def main():
# imgsample_cleaning()
'''3.1 计算事件间相似度: 将 front、back 的所有 track 特征合并'''
# calculate_similarity()
calculate_similarity()
'''3.2 计算事件间相似度: 考虑前后摄的不同组合,或 track 间的不同组合'''
# calculate_similarity_track()
@ -766,8 +786,29 @@ def main():
if __name__ == "__main__":
save_dir = Path(f'./result/')
# save_dir = Path(f'./result/')
# read_csv_file()
main()

390
tracking/module_analysis.py Normal file
View File

@ -0,0 +1,390 @@
# -*- coding: utf-8 -*-
"""
Created on Thu May 30 14:03:03 2024
现场测试性能分析
@author: ym
"""
import os
import cv2
import numpy as np
from pathlib import Path
import sys
sys.path.append(r"D:\DetectTracking")
from tracking.utils.plotting import Annotator, colors, draw_tracking_boxes
from tracking.utils import Boxes, IterableSimpleNamespace, yaml_load
from tracking.trackers import BOTSORT, BYTETracker
from tracking.dotrack.dotracks_back import doBackTracks
from tracking.dotrack.dotracks_front import doFrontTracks
from tracking.utils.drawtracks import plot_frameID_y2, draw_all_trajectories
from tracking.utils.read_data import extract_data, read_deletedBarcode_file, read_tracking_output
from contrast_analysis import contrast_analysis
from tracking.utils.annotator import TrackAnnotator
W, H = 1024, 1280
Mode = 'front' #'back'
ImgFormat = ['.jpg', '.jpeg', '.png', '.bmp']
def video2imgs(path):
vpath = os.path.join(path, "videos")
k = 0
have = False
for filename in os.listdir(vpath):
file, ext = os.path.splitext(filename)
imgdir = os.path.join(path, file)
if os.path.exists(imgdir):
continue
else:
os.mkdir(imgdir)
vfile = os.path.join(vpath, filename)
cap = cv2.VideoCapture(vfile)
i = 0
while True:
ret, frame = cap.read()
if not ret:
break
i += 1
imgp = os.path.join(imgdir, file+f"_{i}.png")
cv2.imwrite(imgp, frame)
print(filename+f": {i}")
cap.release()
k+=1
if k==1000:
break
def draw_boxes():
datapath = r'D:\datasets\ym\videos_test\20240530\1_tracker_inout(1).data'
VideosData = read_tracker_input(datapath)
bboxes = VideosData[0][0]
ffeats = VideosData[0][1]
videopath = r"D:\datasets\ym\videos_test\20240530\134458234-1cd970cf-f8b9-4e80-9c2e-7ca3eec83b81-1_seek0.10415589124891511.mp4"
cap = cv2.VideoCapture(videopath)
i = 0
while True:
ret, frame = cap.read()
if not ret:
break
annotator = Annotator(frame.copy(), line_width=3)
boxes = bboxes[i]
for *xyxy, conf, cls in reversed(boxes):
label = f'{int(cls)}: {conf:.2f}'
color = colors(int(cls), True)
annotator.box_label(xyxy, label, color=color)
img = annotator.result()
imgpath = r"D:\datasets\ym\videos_test\20240530\result\int8_front\{}.png".format(i+1)
cv2.imwrite(imgpath, img)
print(f"Output: {i}")
i += 1
cap.release()
def read_imgs(imgspath, CamerType):
imgs, frmIDs = [], []
for filename in os.listdir(imgspath):
file, ext = os.path.splitext(filename)
flist = file.split('_')
if len(flist)==4 and ext in ImgFormat:
camID, frmID = flist[0], int(flist[-1])
imgpath = os.path.join(imgspath, filename)
img = cv2.imread(imgpath)
if camID==CamerType:
imgs.append(img)
frmIDs.append(frmID)
if len(frmIDs):
indice = np.argsort(np.array(frmIDs))
imgs = [imgs[i] for i in indice]
return imgs
pass
def init_tracker(tracker_yaml = None, bs=1):
"""
Initialize tracker for object tracking during prediction.
"""
TRACKER_MAP = {'bytetrack': BYTETracker, 'botsort': BOTSORT}
cfg = IterableSimpleNamespace(**yaml_load(tracker_yaml))
tracker = TRACKER_MAP[cfg.tracker_type](args=cfg, frame_rate=30)
return tracker
def tracking(bboxes, ffeats):
tracker_yaml = r"./trackers/cfg/botsort.yaml"
tracker = init_tracker(tracker_yaml)
TrackBoxes = np.empty((0, 9), dtype = np.float32)
TracksDict = {}
'''========================== 执行跟踪处理 ============================='''
# dets 与 feats 应保持严格对应
for dets, feats in zip(bboxes, ffeats):
det_tracking = Boxes(dets).cpu().numpy()
tracks = tracker.update(det_tracking, features=feats)
'''tracks: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]
0 1 2 3 4 5 6 7 8
这里frame_index 也可以用视频的 帧ID 代替, box_index 保持不变
'''
if len(tracks):
TrackBoxes = np.concatenate([TrackBoxes, tracks], axis=0)
FeatDict = {}
for track in tracks:
tid = int(track[8])
FeatDict.update({tid: feats[tid, :]})
frameID = tracks[0, 7]
# print(f"frameID: {int(frameID)}")
assert len(tracks) == len(FeatDict), f"Please check the func: tracker.update() at frameID({int(frameID)})"
TracksDict[f"frame_{int(frameID)}"] = {"feats":FeatDict}
return TrackBoxes, TracksDict
def do_tracker_tracking(fpath, save_dir):
bboxes, ffeats, trackerboxes, tracker_feat_dict, trackingboxes, tracking_feat_dict = extract_data(fpath)
tboxes, feats_dict = tracking(bboxes, ffeats)
CamerType = os.path.basename(fpath).split('_')[0]
dirname = os.path.split(os.path.split(fpath)[0])[1]
if CamerType == '1':
vts = doFrontTracks(tboxes, feats_dict)
vts.classify()
plt = plot_frameID_y2(vts)
plt.savefig('front_y2.png')
# plt.close()
elif CamerType == '0':
vts = doBackTracks(tboxes, feats_dict)
vts.classify()
filename = dirname+'_' + CamerType
edgeline = cv2.imread("./shopcart/cart_tempt/edgeline.png")
draw_all_trajectories(vts, edgeline, save_dir, filename)
else:
print("Please check data file!")
def do_tracking(fpath, savedir):
'''
fpath: 算法各模块输出的data文件地址匹配
savedir: 对 fpath 各模块输出的复现
分析具体视频时,需指定 fpath 和 savedir
'''
# fpath = r'D:\contrast\dataset\1_to_n\709\20240709-102758_6971558612189\1_track.data'
# savedir = r'D:\contrast\dataset\result\20240709-102843_6958770005357_6971558612189\error_6971558612189'
imgpath, dfname = os.path.split(fpath)
CamerType = dfname.split('_')[0]
bboxes, ffeats, trackerboxes, tracker_feat_dict, trackingboxes, tracking_feat_dict = extract_data(fpath)
tracking_output_path = os.path.join(imgpath, CamerType + '_tracking_output.data')
tracking_output_boxes, _ = read_tracking_output(tracking_output_path)
save_dir, basename = os.path.split(savedir)
if not os.path.exists(savedir):
os.makedirs(savedir)
''' 读取 fpath 中 track.data 文件对应的图像 '''
imgs = read_imgs(imgpath, CamerType)
''' 在 imgs 上画框并保存,如果 trackerboxes 的帧数和 imgs 数不匹配,返回原图'''
imgs_dw = draw_tracking_boxes(imgs, trackerboxes)
if len(imgs_dw)==0:
imgs_dw = [img for img in imgs]
print(f"fpath: {imgpath}, savedir: {savedir}。Tracker输出的图像数和 imgs 中图像数不相等,无法一一匹配并画框")
for i in range(len(imgs_dw)):
img_savepath = os.path.join(savedir, CamerType + "_" + f"{i}.png")
# img = imgs_dw[i]
cv2.imwrite(img_savepath, imgs_dw[i])
if not isinstance(savedir, Path):
savedir = Path(savedir)
save_dir = savedir.parent
traj_graphic = basename + '_' + CamerType
if CamerType == '1':
vts = doFrontTracks(trackerboxes, tracker_feat_dict)
vts.classify()
plt = plot_frameID_y2(vts)
ftpath = save_dir.joinpath(f"{traj_graphic}_front_y2.png")
plt.savefig(str(ftpath))
plt.close()
elif CamerType == '0':
vts = doBackTracks(trackerboxes, tracker_feat_dict)
vts.classify()
edgeline = cv2.imread("./shopcart/cart_tempt/edgeline.png")
draw_all_trajectories(vts, edgeline, save_dir, traj_graphic)
else:
print("Please check data file!")
'''================== 现场测试的 tracking() 算法输出 =================='''
if CamerType == '1':
aline = cv2.imread("./shopcart/cart_tempt/board_ftmp_line.png")
elif CamerType == '0':
aline = cv2.imread("./shopcart/cart_tempt/edgeline.png")
else:
print("Please check data file!")
bline = aline.copy()
annotator = TrackAnnotator(aline, line_width=2)
for track in trackingboxes:
annotator.plotting_track(track)
aline = annotator.result()
annotator = TrackAnnotator(bline, line_width=2)
if not isinstance(tracking_output_boxes, list):
tracking_output_boxes = [tracking_output_boxes]
for track in tracking_output_boxes:
annotator.plotting_track(track)
bline = annotator.result()
abimg = np.concatenate((aline, bline), axis = 1)
abH, abW = abimg.shape[:2]
cv2.line(abimg, (int(abW/2), 0), (int(abW/2), abH), (128, 255, 128), 2)
algpath = save_dir.joinpath(f"{traj_graphic}_Alg.png")
cv2.imwrite(algpath, abimg)
return
def main_loop():
del_barcode_file = 'D:/contrast/dataset/compairsonResult/deletedBarcode_20240709_pm.txt'
basepath = r'D:\contrast\dataset\1_to_n\709' # 测试数据文件夹地址
SavePath = r'D:\contrast\dataset\result' # 结果保存地址
prefix = ["getout_", "input_", "error_"]
'''获取性能测试数据相关路径'''
relative_paths = contrast_analysis(del_barcode_file, basepath, SavePath)
'''开始循环执行每次测试过任务'''
k = 0
for tuple_paths in relative_paths:
'''生成文件夹存储结果图像的文件夹'''
namedirs = []
for data_path in tuple_paths:
base_name = os.path.basename(data_path).strip().split('_')
if len(base_name[-1]):
name = base_name[-1]
else:
name = base_name[0]
namedirs.append(name)
sdir = "_".join(namedirs)
savepath = os.path.join(SavePath, sdir)
if not os.path.exists(savepath):
os.makedirs(savepath)
for path in tuple_paths:
for filename in os.listdir(path):
fpath = os.path.join(path, filename)
if os.path.isfile(fpath) and filename.find("track.data")>0:
enent_name = ''
'''构建结果保存文件名前缀'''
for i, name in enumerate(namedirs):
if fpath.find(name)>0:
enent_name = prefix[i] + name
break
spath = os.path.join(savepath, enent_name)
do_tracking(fpath, spath)
k +=1
if k==1:
break
def main_fold():
save_dir = Path('./result')
if not save_dir.exists():
save_dir.mkdir(parents=True, exist_ok=True)
files_path = 'D:/contrast/dataset/1_to_n/709/20240709-112658_6903148351833/'
for filename in os.listdir(files_path):
filename = '1_track.data'
fpath = os.path.join(files_path, filename)
if os.path.isfile(fpath) and filename.find("track.data")>0:
# do_tracker_tracking(fpath, save_dir)
do_tracking(fpath, save_dir)
if __name__ == "__main__":
try:
main_loop()
# main_fold()
except Exception as e:
print(f'Error: {e}')

35
tracking/rename.py Normal file
View File

@ -0,0 +1,35 @@
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 8 09:51:59 2024
@author: ym
"""
import os
def main():
directory = r'D:\DetectTracking\runs\detect'
directory = r'D:\DetectTracking\tracking\result\tracks'
suffix = '_'
for root, dirs, files in os.walk(directory):
for name in dirs:
old_name = os.path.join(root, name)
new_name = os.path.join(root, f"{name}{suffix}")
try:
os.rename(old_name, new_name)
except Exception as e:
print(f"Failed to rename directory '{old_name}': {e}")
for name in files:
old_name = os.path.join(root, name)
file, ext = os.path.splitext(name)
new_name = os.path.join(root, f"{file}{suffix}{ext}")
try:
os.rename(old_name, new_name)
except Exception as e:
print(f"Failed to rename file '{old_name}': {e}")
if __name__ == "__main__":
main()

View File

@ -12,7 +12,6 @@ import time
import pickle
import matplotlib.pyplot as plt
import pandas as pd
from scipy.spatial.distance import cdist
from pathlib import Path
@ -39,8 +38,7 @@ def detect_start_end(bboxes, features_dict, filename):
boxes = np.empty(shape=(0, 9), dtype = np.float)
if filename.find("back") >= 0:
vts = doBackTracks(bboxes, features_dict)
vtx = [t for t in vts if t.cls != 0]
vtx = [t for t in vts if t.cls != 0]
for track in vtx:
if track.moving_index.size:
boxes = np.concatenate((boxes, track.moving_index), axis=0)
@ -64,7 +62,7 @@ def detect_start_end(bboxes, features_dict, filename):
start = 0
return start, end
def save_subimgs(vts, file, TracksDict):
imgdir = Path(f'./result/imgs/{file}')
if not imgdir.exists():
@ -82,13 +80,14 @@ def save_subimgs(vts, file, TracksDict):
cv2.imwrite(str(imgdir) + f"/{tid}_{fid}_{bid}.png", img)
def have_tracked():
trackdict = r'./data/trackdicts'
trackdict = r'./data/trackdicts_20240608'
alltracks = []
k = 0
gt = Profile()
for filename in os.listdir(trackdict):
# filename = 'test_20240402-173935_6920152400975_back_174037372.pkl'
# filename = '加购_91.pkl'
filename = '6907149227609_20240508-174733_back_returnGood_70f754088050_425_17327712807.pkl'
filename = '6907149227609_20240508-174733_front_returnGood_70f754088050_425_17327712807.pkl'
file, ext = os.path.splitext(filename)
filepath = os.path.join(trackdict, filename)
@ -117,12 +116,10 @@ def have_tracked():
edgeline = cv2.imread("./shopcart/cart_tempt/edgeline.png")
draw_all_trajectories(vts, edgeline, save_dir, filename)
print(file+f" need time: {gt.dt:.2f}s")
# k += 1
# if k==1:
# break
k += 1
if k==1:
break
if len(alltracks):
drawFeatures(alltracks, save_dir)

View File

@ -1,223 +0,0 @@
# -*- coding: utf-8 -*-
"""
Created on Thu May 30 14:03:03 2024
@author: ym
"""
import os
import cv2
import numpy as np
from pathlib import Path
import sys
sys.path.append(r"D:\DetectTracking")
from tracking.utils.plotting import Annotator, colors
from tracking.utils import Boxes, IterableSimpleNamespace, yaml_load
from tracking.trackers import BOTSORT, BYTETracker
from tracking.dotrack.dotracks_back import doBackTracks
from tracking.dotrack.dotracks_front import doFrontTracks
from tracking.utils.drawtracks import plot_frameID_y2, draw_all_trajectories
W, H = 1024, 1280
Mode = 'front' #'back'
def read_data_file(datapath):
with open(datapath, 'r') as file:
lines = file.readlines()
Videos = []
FrameBoxes, FrameFeats = [], []
boxes, feats = [], []
bboxes, ffeats = [], []
timestamp = []
t1 = None
for line in lines:
if line.find('CameraId') >= 0:
t = int(line.split(',')[1].split(':')[1])
timestamp.append(t)
if len(boxes) and len(feats):
FrameBoxes.append(np.array(boxes, dtype = np.float32))
FrameFeats.append(np.array(feats, dtype = np.float32))
boxes, feats = [], []
if t1 and t - t1 > 1e4:
Videos.append((FrameBoxes, FrameFeats))
FrameBoxes, FrameFeats = [], []
t1 = int(line.split(',')[1].split(':')[1])
if line.find('box') >= 0:
box = line.split(':', )[1].split(',')[:-1]
boxes.append(box)
bboxes.append(boxes)
if line.find('feat') >= 0:
feat = line.split(':', )[1].split(',')[:-1]
feats.append(feat)
ffeats.append(feat)
FrameBoxes.append(np.array(boxes, dtype = np.float32))
FrameFeats.append(np.array(feats, dtype = np.float32))
Videos.append((FrameBoxes, FrameFeats))
TimeStamp = np.array(timestamp, dtype = np.float32)
DimesDiff = np.diff((timestamp))
return Videos
def video2imgs(path):
vpath = os.path.join(path, "videos")
k = 0
have = False
for filename in os.listdir(vpath):
file, ext = os.path.splitext(filename)
imgdir = os.path.join(path, file)
if os.path.exists(imgdir):
continue
else:
os.mkdir(imgdir)
vfile = os.path.join(vpath, filename)
cap = cv2.VideoCapture(vfile)
i = 0
while True:
ret, frame = cap.read()
if not ret:
break
i += 1
imgp = os.path.join(imgdir, file+f"_{i}.png")
cv2.imwrite(imgp, frame)
print(filename+f": {i}")
cap.release()
k+=1
if k==1000:
break
def draw_boxes():
datapath = r'D:\datasets\ym\videos_test\20240530\1_tracker_inout(1).data'
VideosData = read_data_file(datapath)
bboxes = VideosData[0][0]
ffeats = VideosData[0][1]
videopath = r"D:\datasets\ym\videos_test\20240530\134458234-1cd970cf-f8b9-4e80-9c2e-7ca3eec83b81-1_seek0.10415589124891511.mp4"
cap = cv2.VideoCapture(videopath)
i = 0
while True:
ret, frame = cap.read()
if not ret:
break
annotator = Annotator(frame.copy(), line_width=3)
boxes = bboxes[i]
for *xyxy, conf, cls in reversed(boxes):
label = f'{int(cls)}: {conf:.2f}'
color = colors(int(cls), True)
annotator.box_label(xyxy, label, color=color)
img = annotator.result()
imgpath = r"D:\datasets\ym\videos_test\20240530\result\int8_front\{}.png".format(i+1)
cv2.imwrite(imgpath, img)
print(f"Output: {i}")
i += 1
cap.release()
def init_tracker(tracker_yaml = None, bs=1):
"""
Initialize tracker for object tracking during prediction.
"""
TRACKER_MAP = {'bytetrack': BYTETracker, 'botsort': BOTSORT}
cfg = IterableSimpleNamespace(**yaml_load(tracker_yaml))
tracker = TRACKER_MAP[cfg.tracker_type](args=cfg, frame_rate=30)
return tracker
def tracking(bboxes, ffeats):
tracker_yaml = r"./trackers/cfg/botsort.yaml"
tracker = init_tracker(tracker_yaml)
track_boxes = np.empty((0, 9), dtype = np.float32)
features_dict = {}
'''==================== 执行跟踪处理 ======================='''
for dets, feats in zip(bboxes, ffeats):
# 需要根据frame_id重排序
det_tracking = Boxes(dets).cpu().numpy()
tracks = tracker.update(det_tracking, feats)
if len(tracks):
track_boxes = np.concatenate([track_boxes, tracks], axis=0)
feat_dict = {int(x.idx): x.curr_feat for x in tracker.tracked_stracks if x.is_activated}
frame_id = tracks[0, 7]
features_dict.update({int(frame_id): feat_dict})
return det_tracking, features_dict
def main():
datapath = r'D:\datasets\ym\videos_test\20240530\1_tracker_inout(1).data'
VideosData = read_data_file(datapath)
bboxes = VideosData[0][0]
ffeats = VideosData[0][1]
bboxes, feats_dict = tracking(bboxes, ffeats)
if Mode == "front":
vts = doFrontTracks(bboxes, feats_dict)
vts.classify()
plt = plot_frameID_y2(vts)
plt.savefig('front_y2.png')
# plt.close()
else:
vts = doBackTracks(bboxes, feats_dict)
vts.classify()
edgeline = cv2.imread("./shopcart/cart_tempt/edgeline.png")
draw_all_trajectories(vts, edgeline, save_dir, filename)
if __name__ == "__main__":
filename = 'traj.png'
save_dir = Path('./result')
if not save_dir.exists():
save_dir.mkdir(parents=True, exist_ok=True)
main()

View File

@ -119,12 +119,14 @@ class BOTSORT(BYTETracker):
"""Returns an instance of KalmanFilterXYWH for object tracking."""
return KalmanFilterXYWH()
def init_track(self, dets, scores, cls, imgs):
def init_track(self, dets, scores, cls, imgs, features_keep):
"""Initialize track with detections, scores, and classes."""
if len(dets) == 0:
return []
if self.args.with_reid and self.encoder is not None:
features_keep = self.encoder.inference(imgs, dets)
if features_keep is None:
features_keep = self.encoder.inference(imgs, dets)
return [BOTrack(xyxy, s, c, f) for (xyxy, s, c, f) in zip(dets, scores, cls, features_keep)] # detections
else:
return [BOTrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)] # detections

View File

@ -18,7 +18,12 @@ def dists_update(dists, strack_pool, detections):
blabel = np.array([int(stack.cls) for stack in detections])
amlabel = np.expand_dims(alabel, axis=1).repeat(len(detections),axis=1)
bmlabel = np.expand_dims(blabel, axis=0).repeat(len(strack_pool),axis=0)
dist_label = 1 - (bmlabel == amlabel)
mlabel = bmlabel == amlabel
iou_dist = matching.iou_distance(strack_pool, detections) > 0.1 #boxes iou>0.9时,可以不考虑类别
dist_label = (1 - mlabel) & iou_dist # 不同类,且不是严格重叠,需考虑类别距离
dist_label = 1 - mlabel
dists = np.where(dists > dist_label, dists, dist_label)
return dists
@ -103,6 +108,7 @@ class STrack(BaseTrack):
self.tracklet_len = 0
self.state = TrackState.Tracked
self.is_activated = True
self.first_find = False
self.frame_id = frame_id
if new_id:
self.track_id = self.next_id()
@ -127,6 +133,7 @@ class STrack(BaseTrack):
self.convert_coords(new_tlwh))
self.state = TrackState.Tracked
self.is_activated = True
self.first_find = False
self.score = new_track.score
self.cls = new_track.cls
@ -207,7 +214,7 @@ class BYTETracker:
self.args.new_track_thresh = 0.5
def update(self, results, img=None):
def update(self, results, img=None, features=None):
"""Updates object tracker with new detections and returns tracked object bounding boxes."""
self.frame_id += 1
activated_stracks = []
@ -240,7 +247,7 @@ class BYTETracker:
cls_keep = cls[remain_inds]
cls_second = cls[inds_second]
detections = self.init_track(dets, scores_keep, cls_keep, img)
detections = self.init_track(dets, scores_keep, cls_keep, img, features)
# Add newly detected tracklets to tracked_stracks
unconfirmed = []
@ -283,7 +290,7 @@ class BYTETracker:
# Step 3: Second association, with low score detection boxes
# association the untrack to the low score detections
detections_second = self.init_track(dets_second, scores_second, cls_second, img)
detections_second = self.init_track(dets_second, scores_second, cls_second, img, features)
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
# TODO
@ -366,7 +373,7 @@ class BYTETracker:
output2 = [x.tlwh_to_tlbr(x._tlwh).tolist() + [x.track_id, x.score, x.cls, x.frame_id, x.idx]
for x in first_finded if x.first_find]
output = np.asarray(output1+output2, dtype=np.float32)
output = np.asarray(output1 + output2, dtype=np.float32)
return output
@ -382,7 +389,7 @@ class BYTETracker:
tracks = []
feats = []
for t in self.tracked_stracks:
if t.is_activated:
if t.is_activated or t.first_find:
track = t.tlbr.tolist() + [t.track_id, t.score, t.cls, t.idx]
feat = t.curr_feature
@ -398,7 +405,7 @@ class BYTETracker:
"""Returns a Kalman filter object for tracking bounding boxes."""
return KalmanFilterXYAH()
def init_track(self, dets, scores, cls, img=None):
def init_track(self, dets, scores, cls, img=None, feats=None):
"""Initialize object tracking with detections and scores using STrack algorithm."""
return [STrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)] if len(dets) else [] # detections
@ -455,7 +462,22 @@ class BYTETracker:
def remove_duplicate_stracks(stracksa, stracksb):
"""Remove duplicate stracks with non-maximum IOU distance."""
pdist = matching.iou_distance(stracksa, stracksb)
pairs = np.where(pdist < 0.15)
#### ===================================== written by WQG
mlabel = []
if len(stracksa) and len(stracksb):
alabel = np.array([int(stack.cls) for stack in stracksa])
blabel = np.array([int(stack.cls) for stack in stracksb])
amlabel = np.expand_dims(alabel, axis=1).repeat(len(stracksb),axis=1)
bmlabel = np.expand_dims(blabel, axis=0).repeat(len(stracksa),axis=0)
mlabel = bmlabel == amlabel
if len(mlabel):
condt = (pdist<0.15) & mlabel # 需满足iou足够小且类别相同才予以排除
else:
condt = pdist<0.15
pairs = np.where(condt)
dupa, dupb = [], []
for p, q in zip(*pairs):
timep = stracksa[p].frame_id - stracksa[p].start_frame

View File

@ -45,8 +45,7 @@ class ReIDInterface:
])
self.model = nn.DataParallel(model).to(self.device)
# self.model = nn.DataParallel(model).to(self.device)
self.model = model
self.model.load_state_dict(torch.load(self.model_path, map_location=self.device))
self.model.eval()

Binary file not shown.

Binary file not shown.

View File

@ -80,15 +80,32 @@ def plot_frameID_y2(vts):
return plt
def draw_all_trajectories(vts, edgeline, save_dir, filename):
def draw_all_trajectories(vts, edgeline, save_dir, file, draw5p=False):
'''显示四种类型结果'''
file, ext = os.path.splitext(filename)
# file, ext = os.path.splitext(filename)
# edgeline = cv2.imread("./shopcart/cart_tempt/edgeline.png")
# edgeline2 = edgeline1.copy()
# edgeline = np.concatenate((edgeline1, edgeline2), exis = 1)
'''1. tracks 5点轨迹'''
trackpth = save_dir.parent /Path("trajectory")/ Path(f"{file}")
if not isinstance(save_dir, Path): save_dir = Path(save_dir)
''' all tracks 中心轨迹'''
img1, img2 = edgeline.copy(), edgeline.copy()
img1 = drawTrack(vts.tracks, img1)
img2 = drawTrack(vts.Residual, img2)
img = np.concatenate((img1, img2), axis = 1)
H, W = img.shape[:2]
cv2.line(img, (int(W/2), 0), (int(W/2), H), (128, 255, 128), 2)
imgpth = save_dir.joinpath(f"{file}_show.png")
cv2.imwrite(str(imgpth), img)
if not draw5p:
return
''' tracks 5点轨迹'''
trackpth = save_dir / Path("trajectory") / Path(f"{file}")
if not trackpth.exists():
trackpth.mkdir(parents=True, exist_ok=True)
for track in vts.tracks:
@ -106,23 +123,9 @@ def draw_all_trajectories(vts, edgeline, save_dir, filename):
pth = trackpth.joinpath(f"{track.tid}_.png")
cv2.imwrite(str(pth), img)
'''2. all tracks 中心轨迹'''
img1, img2 = edgeline.copy(), edgeline.copy()
img1 = drawTrack(vts.tracks, img1)
img2 = drawTrack(vts.Residual, img2)
img = np.concatenate((img1, img2), axis = 1)
H, W = img.shape[:2]
cv2.line(img, (int(W/2), 0), (int(W/2), H), (128, 255, 128), 2)
pth = save_dir.joinpath(f"{file}_show.png")
cv2.imwrite(str(pth), img)
# =============================================================================
# '''3. moving tracks 中心轨迹'''
# filename2 = f"{file}_show_r.png"
@ -134,13 +137,11 @@ def draw_all_trajectories(vts, edgeline, save_dir, filename):
# =============================================================================
# '''5. tracks 时序trajmin、trajmax、arearate、incartrate'''
# plt = drawtracefeat(vts)
# pth = save_dir.joinpath(f"{file}_x.png")
# plt.savefig(pth)
# plt.close('all')
# =============================================================================
'''5. tracks 时序trajmin、trajmax、arearate、incartrate'''
# plt = drawtracefeat(vts)
# pth = save_dir.joinpath(f"{file}_x.png")
# plt.savefig(pth)
# plt.close('all')

View File

@ -15,17 +15,18 @@ def readDict(boxes, TracksDict):
for i in range(boxes.shape[0]):
tid, fid, bid = int(boxes[i, 4]), int(boxes[i, 7]), int(boxes[i, 8])
feat = TracksDict[f"frame_{fid}"]["feats"][bid]
img = TracksDict[f"frame_{fid}"]["imgs"][bid]
trackdict = TracksDict[f"frame_{fid}"]
if "feats" in trackdict:
feat = trackdict["feats"][bid]
feats.append(feat)
box = TracksDict[f"frame_{fid}"]["boxes"][bid]
if "boxes" in trackdict:
box = trackdict["boxes"][bid]
assert (box[:4].astype(int) == boxes[i, :4].astype(int)).all(), f"Please check: frame_{fid}"
assert (box[:4].astype(int) == boxes[i, :4].astype(int)).all(), f"Please check: frame_{fid}"
feats.append(feat)
# img = TracksDict[fid][f'{bid}_img']
# cv2.imwrite(f'./data/imgs/{tid}_{fid}_{bid}.png', img)
if "imgs" in trackdict:
img = trackdict["imgs"][bid]
cv2.imwrite(f'./data/imgs/{tid}_{fid}_{bid}.png', img)
return np.asarray(feats, dtype=np.float32)
@ -59,13 +60,12 @@ def track_equal_track(atrack, btrack):
''' 2. 轨迹特征相似度判断'''
feat = np.concatenate((afeat, bfeat), axis=0)
emb_simil = 1-np.maximum(0.0, cdist(feat, feat, 'cosine'))
emb_ = 1-cdist(np.mean(afeat, axis=0)[None, :], np.mean(bfeat, axis=0)[None, :], 'cosine')
emb_simil = 1 - np.maximum(0.0, cdist(feat, feat, 'cosine'))
emb_ = 1 - np.maximum(0.0, cdist(np.mean(afeat, axis=0)[None, :], np.mean(bfeat, axis=0)[None, :], 'cosine'))/2
if emb_[0, 0]<0.66:
return False
''' 3. 轨迹空间iou'''
alabel = np.array([0] * afids.size, dtype=np.int_)
blabel = np.array([1] * bfids.size, dtype=np.int_)
@ -93,7 +93,7 @@ def track_equal_track(atrack, btrack):
af, bf = afeat[a, :], bfeat[b, :]
emb_ab = 1-cdist(af[None, :], bf[None, :], 'cosine')
emb_ab = 1 - np.maximum(0.0, cdist(af[None, :], bf[None, :], 'cosine'))
xa1, ya1 = abox[0] - abox[2]/2, abox[1] - abox[3]/2
@ -113,7 +113,22 @@ def track_equal_track(atrack, btrack):
ious.append(inter/union)
embs.append(emb_ab[0, 0])
''' 4. 和同一手部关联,如何将该代码和 iou 部分相融合,需进一步完善'''
# ahands = np.array(atrack.Hands)
# bhands = np.array(btrack.Hands)
# ahids = ahands[:, 0]
# bhids = bhands[:, 0]
# interhid = set(ahids).intersection(set(bhids))
# for hid in interhid:
# aidx = ahands[:, 0] == hid
# bidx = bhands[:, 0] == hid
# ahfids = ahids[aidx, 1]
# bhfids = bhids[bidx, 1]
cont = False if len(interfid) else True # fid 无交集
cont1 = all(emb > 0.5 for emb in embs)

View File

@ -1,4 +1,4 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
# Ultralytics YOLO 🚀, AGPL-3.0 license
import contextlib
import math
@ -284,5 +284,59 @@ def boxing_img(det, img, line_width=3):
imgx = annotator.result()
return imgx
def draw_tracking_boxes(imgs, tracks, scale=2):
'''tracks: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]
0 1 2 3 4 5 6 7 8
关键:
(1) imgs中的次序和 track 中的 fid 对应
(2) img 尺度小对于xyxy减半
'''
def array2list(bboxes):
track_fids = np.unique(bboxes[:, 7].astype(int))
track_fids.sort()
lboxes = []
for f_id in track_fids:
# print(f"The ID is: {t_id}")
idx = np.where(bboxes[:, 7] == f_id)[0]
box = bboxes[idx, :]
lboxes.append(box)
assert len(set(box[:, 4])) == len(box), "Please check!!!"
return lboxes
bboxes = array2list(tracks)
if len(bboxes)!=len(imgs):
return []
subimgs = []
for i, boxes in enumerate(bboxes):
annotator = Annotator(imgs[i].copy())
for *xyxy, tid, conf, cls, fid, bid in boxes:
label = f'id:{int(tid)}_{int(cls)}_{conf:.2f}'
if cls==0:
color = colors(int(cls), True)
elif tid>0 and cls!=0:
color = colors(int(tid), True)
else:
color = colors(19, True) # 19为调色板的最后一个元素
pt2 = [p/scale for p in xyxy]
annotator.box_label(pt2, label, color=color)
img = annotator.result()
subimgs.append(img)
return subimgs

View File

@ -12,10 +12,12 @@ class Boxes:
"""Initialize the Boxes class."""
if boxes.ndim == 1:
boxes = boxes[None, :]
n = boxes.shape[-1]
assert n in (6, 7, 8), f'expected `n` in [6, 7], but got {n}' # xyxyb, track_id, conf, cls
m, n = boxes.shape
assert n in (6, 7), f'expected `n` in [6, 7], but got {n}' # xyxy, track_id, conf, cls
'''对每一个box进行编号利用该编号可以索引对应 feature'''
self.data = np.concatenate([boxes[:, :4], np.arange(m).reshape(-1, 1), boxes[:, 4:]], axis=-1)
self.data = boxes
self.orig_shape = orig_shape
def cpu(self):
@ -30,10 +32,9 @@ class Boxes:
"""Return the boxes in xyxy format."""
return self.data[:, :4]
@property
def xyxyb(self):
"""Return the boxes in xyxyb format."""
"""Return the boxes in xyxyb format."""
return self.data[:, :5]
@property

236
tracking/utils/read_data.py Normal file
View File

@ -0,0 +1,236 @@
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 5 13:59:21 2024
func: extract_data()
读取 Pipeline 各模块的数据,在 read_pipeline_data.py马晓慧的基础上完成接口改造
@author: ym
"""
import numpy as np
import re
import os
def str_to_float_arr(s):
# 移除字符串末尾的逗号(如果存在)
if s.endswith(','):
s = s[:-1]
# 使用split()方法分割字符串然后将每个元素转化为float
float_array = [float(x) for x in s.split(",")]
return float_array
def find_samebox_in_array(arr, target):
for i, st in enumerate(arr):
if st[:4] == target[:4]:
return i
return -1
def extract_data(datapath):
bboxes, ffeats = [], []
trackerboxes = np.empty((0, 9), dtype=np.float64)
trackerfeats = np.empty((0, 256), dtype=np.float64)
boxes, feats, tboxes, tfeats = [], [], [], []
with open(datapath, 'r', encoding='utf-8') as lines:
for line in lines:
line = line.strip() # 去除行尾的换行符和可能的空白字符
if not line: # 跳过空行
continue
if line.find("CameraId")>=0:
if len(boxes): bboxes.append(np.array(boxes))
if len(feats): ffeats.append(np.array(feats))
if len(tboxes):
trackerboxes = np.concatenate((trackerboxes, np.array(tboxes)))
if len(tfeats):
trackerfeats = np.concatenate((trackerfeats, np.array(tfeats)))
boxes, feats, tboxes, tfeats = [], [], [], []
if line.find("box:") >= 0 and line.find("output_box:") < 0:
box = line[line.find("box:") + 4:].strip()
boxes.append(str_to_float_arr(box))
if line.find("feat:") >= 0:
feat = line[line.find("feat:") + 5:].strip()
feats.append(str_to_float_arr(feat))
if line.find("output_box:") >= 0:
box = str_to_float_arr(line[line.find("output_box:") + 11:].strip())
tboxes.append(box) # 去掉'output_box:'并去除可能的空白字符
index = find_samebox_in_array(boxes, box)
if index >= 0:
# feat_f = str_to_float_arr(input_feats[index])
feat_f = feats[index]
norm_f = np.linalg.norm(feat_f)
feat_f = feat_f / norm_f
tfeats.append(feat_f)
if len(boxes): bboxes.append(np.array(boxes))
if len(feats): ffeats.append(np.array(feats))
if len(tboxes): trackerboxes = np.concatenate((trackerboxes, np.array(tboxes)))
if len(tfeats): trackerfeats = np.concatenate((trackerfeats, np.array(tfeats)))
assert(len(bboxes)==len(ffeats)), "Error at Yolo output!"
assert(len(trackerboxes)==len(trackerfeats)), "Error at tracker output!"
tracker_feat_dict = {}
for i in range(len(trackerboxes)):
tid, fid, bid = int(trackerboxes[i, 4]), int(trackerboxes[i, 7]), int(trackerboxes[i, 8])
if f"frame_{fid}" not in tracker_feat_dict:
tracker_feat_dict[f"frame_{fid}"]= {"feats": {}}
tracker_feat_dict[f"frame_{fid}"]["feats"].update({bid: trackerfeats[i, :]})
boxes, trackingboxes= [], []
tracking_flag = False
with open(datapath, 'r', encoding='utf-8') as lines:
for line in lines:
line = line.strip() # 去除行尾的换行符和可能的空白字符
if not line: # 跳过空行
continue
if tracking_flag:
if line.find("tracking_") >= 0:
tracking_flag = False
else:
box = str_to_float_arr(line)
boxes.append(box)
if line.find("tracking_") >= 0:
tracking_flag = True
if len(boxes):
trackingboxes.append(np.array(boxes))
boxes = []
if len(boxes):
trackingboxes.append(np.array(boxes))
tracking_feat_dict = {}
for i, boxes in enumerate(trackingboxes):
for box in boxes:
tid, fid, bid = int(box[4]), int(box[7]), int(box[8])
if f"track_{tid}" not in tracking_feat_dict:
tracking_feat_dict[f"track_{tid}"]= {"feats": {}}
tracking_feat_dict[f"track_{tid}"]["feats"].update({f"{fid}_{bid}": tracker_feat_dict[f"frame_{fid}"]["feats"][bid]})
return bboxes, ffeats, trackerboxes, tracker_feat_dict, trackingboxes, tracking_feat_dict
def read_tracking_output(filepath):
boxes = []
feats = []
with open(filepath, 'r', encoding='utf-8') as file:
for line in file:
line = line.strip() # 去除行尾的换行符和可能的空白字符
if not line:
continue
if line.endswith(','):
line = line[:-1]
data = np.array([float(x) for x in line.split(",")])
if data.size == 9:
boxes.append(data)
if data.size == 256:
feats.append(data)
return np.array(boxes), np.array(feats)
def read_deletedBarcode_file(filePth):
with open(filePth, 'r', encoding='utf-8') as f:
lines = f.readlines()
split_flag, all_list = False, []
dict, barcode_list, similarity_list = {}, [], []
clean_lines = [line.strip().replace("'", '').replace('"', '') for line in lines]
for line in clean_lines:
stripped_line = line.strip()
if not stripped_line:
if len(barcode_list): dict['barcode'] = barcode_list
if len(similarity_list): dict['similarity'] = similarity_list
if len(dict): all_list.append(dict)
split_flag = False
dict, barcode_list, similarity_list = {}, [], []
continue
# print(line)
label = line.split(':')[0]
value = line.split(':')[1]
if label == 'SeqDir':
dict['SeqDir'] = value
if label == 'Deleted':
dict['Deleted'] = value
if label == 'List':
split_flag = True
continue
if split_flag:
barcode_list.append(label)
similarity_list.append(value)
if len(barcode_list): dict['barcode'] = barcode_list
if len(similarity_list): dict['similarity'] = similarity_list
if len(dict): all_list.append(dict)
return all_list
if __name__ == "__main__":
files_path = 'D:/contrast/dataset/1_to_n/709/20240709-112658_6903148351833/'
# 遍历目录下的所有文件和目录
for filename in os.listdir(files_path):
filename = '1_track.data'
file_path = os.path.join(files_path, filename)
if os.path.isfile(file_path) and filename.find("track.data")>0:
extract_data(file_path)
print("Done")

View File

@ -0,0 +1,250 @@
# -*- coding: utf-8 -*-
"""
Created on Tue May 21 15:25:23 2024
读取 Pipeline 各模块的数据,主代码由 马晓慧 完成
@author: ieemoo-zl003
"""
import os
import numpy as np
# 替换为你的目录路径
files_path = 'D:/contrast/dataset/1_to_n/709/20240709-112658_6903148351833/'
def str_to_float_arr(s):
# 移除字符串末尾的逗号(如果存在)
if s.endswith(','):
s = s[:-1]
# 使用split()方法分割字符串然后将每个元素转化为float
float_array = np.array([float(x) for x in s.split(",")])
return float_array
def extract_tracker_input_boxes_feats(file_name):
boxes = []
feats = []
with open(file_name, 'r', encoding='utf-8') as file:
for line in file:
line = line.strip() # 去除行尾的换行符和可能的空白字符
# 跳过空行
if not line:
continue
# 检查是否以'box:'或'feat:'开始
if line.find("box:") >= 0 and line.find("output_box:") < 0:
box = line[line.find("box:") + 4:].strip()
boxes.append(str_to_float_arr(box)) # 去掉'box:'并去除可能的空白字符
if line.find("feat:") >= 0:
feat = line[line.find("feat:") + 5:].strip()
feats.append(str_to_float_arr(feat)) # 去掉'box:'并去除可能的空白字符
return np.array(boxes), np.array(feats)
def find_string_in_array(arr, target):
"""
在字符串数组中找到目标字符串对应的行(索引)。
参数:
arr -- 字符串数组
target -- 要查找的目标字符串
返回:
目标字符串在数组中的索引。如果未找到,则返回-1。
"""
tg = [float(t) for k, t in enumerate(target.split(',')) if k<4][:4]
for i, st in enumerate(arr):
st = [float(s) for k, s in enumerate(target.split(',')) if k<4][:4]
if st == tg:
return i
# if st[:20] == target[:20]:
# return i
return -1
def find_samebox_in_array(arr, target):
for i, st in enumerate(arr):
if all(st[:4] == target[:4]):
return i
return -1
def extract_tracker_output_boxes_feats(read_file_name):
input_boxes, input_feats = extract_tracker_input_boxes_feats(read_file_name)
boxes = []
feats = []
with open(read_file_name, 'r', encoding='utf-8') as file:
for line in file:
line = line.strip() # 去除行尾的换行符和可能的空白字符
# 跳过空行
if not line:
continue
# 检查是否以'output_box:'开始
if line.find("output_box:") >= 0:
box = str_to_float_arr(line[line.find("output_box:") + 11:].strip())
boxes.append(box) # 去掉'output_box:'并去除可能的空白字符
index = find_samebox_in_array(input_boxes, box)
if index >= 0:
# feat_f = str_to_float_arr(input_feats[index])
feat_f = input_feats[index]
norm_f = np.linalg.norm(feat_f)
feat_f = feat_f / norm_f
feats.append(feat_f)
return input_boxes, input_feats, np.array(boxes), np.array(feats)
def extract_tracking_output_boxes_feats(read_file_name):
tracker_boxes, tracker_feats, input_boxes, input_feats = extract_tracker_output_boxes_feats(read_file_name)
boxes = []
feats = []
tracking_flag = False
with open(read_file_name, 'r', encoding='utf-8') as file:
for line in file:
line = line.strip() # 去除行尾的换行符和可能的空白字符
# 跳过空行
if not line:
continue
if tracking_flag:
if line.find("tracking_") >= 0:
tracking_flag = False
else:
box = str_to_float_arr(line)
boxes.append(box)
index = find_samebox_in_array(input_boxes, box)
if index >= 0:
feats.append(input_feats[index])
# 检查是否以tracking_'开始
if line.find("tracking_") >= 0:
tracking_flag = True
assert(len(tracker_boxes)==len(tracker_feats)), "Error at Yolo output"
assert(len(input_boxes)==len(input_feats)), "Error at tracker output"
assert(len(boxes)==len(feats)), "Error at tracking output"
return tracker_boxes, tracker_feats, input_boxes, input_feats, np.array(boxes), np.array(feats)
def read_tracking_input(datapath):
with open(datapath, 'r') as file:
lines = file.readlines()
data = []
for line in lines:
data.append([s for s in line.split(',') if len(s)>=3])
# data.append([float(s) for s in line.split(',') if len(s)>=3])
# data = np.array(data, dtype = np.float32)
try:
data = np.array(data, dtype = np.float32)
except Exception as e:
data = np.array([], dtype = np.float32)
print('DataError for func: read_tracking_input()')
return data
def read_tracker_input(datapath):
with open(datapath, 'r') as file:
lines = file.readlines()
Videos = []
FrameBoxes, FrameFeats = [], []
boxes, feats = [], []
timestamp = []
t1 = None
for line in lines:
if line.find('CameraId') >= 0:
t = int(line.split(',')[1].split(':')[1])
timestamp.append(t)
if len(boxes) and len(feats):
FrameBoxes.append(np.array(boxes, dtype = np.float32))
FrameFeats.append(np.array(feats, dtype = np.float32))
boxes, feats = [], []
if t1 and t - t1 > 1e3:
Videos.append((FrameBoxes, FrameFeats))
FrameBoxes, FrameFeats = [], []
t1 = int(line.split(',')[1].split(':')[1])
if line.find('box') >= 0:
box = line.split(':', )[1].split(',')[:-1]
boxes.append(box)
if line.find('feat') >= 0:
feat = line.split(':', )[1].split(',')[:-1]
feats.append(feat)
FrameBoxes.append(np.array(boxes, dtype = np.float32))
FrameFeats.append(np.array(feats, dtype = np.float32))
Videos.append((FrameBoxes, FrameFeats))
# TimeStamp = np.array(timestamp, dtype = np.int64)
# DimesDiff = np.diff((TimeStamp))
# sorted_indices = np.argsort(TimeStamp)
# TimeStamp_sorted = TimeStamp[sorted_indices]
# DimesDiff_sorted = np.diff((TimeStamp_sorted))
return Videos
def main():
files_path = 'D:/contrast/dataset/1_to_n/709/20240709-112658_6903148351833/'
# 遍历目录下的所有文件和目录
for filename in os.listdir(files_path):
# 构造完整的文件路径
file_path = os.path.join(files_path, filename)
if os.path.isfile(file_path) and filename.find("track.data")>0:
tracker_boxes, tracker_feats, tracking_boxes, tracking_feats, output_boxes, output_feats = extract_tracking_output_boxes_feats(file_path)
print("Done")
if __name__ == "__main__":
main()