modify dotrack module

This commit is contained in:
王庆刚
2024-06-03 15:25:39 +08:00
parent d1ea304491
commit f90ef72cbf
19 changed files with 502 additions and 420 deletions

1
.gitignore vendored
View File

@ -54,6 +54,7 @@ VOC/
# Neural Network weights -----------------------------------------------------------------------------------------------
*.weights
*.pt
*.pth
*.pb
*.onnx
*.engine

218
featureVal.py Normal file
View File

@ -0,0 +1,218 @@
# -*- coding: utf-8 -*-
"""
Created on Fri May 31 14:50:21 2024
@author: ym
"""
import cv2
import numpy as np
import torch
from scipy.spatial.distance import cdist
from tracking.trackers.reid.config import config as ReIDConfig
from tracking.trackers.reid.reid_interface import ReIDInterface
ReIDEncoder = ReIDInterface(ReIDConfig)
def read_data_file(datapath):
with open(datapath, 'r') as file:
lines = file.readlines()
Videos = []
FrameBoxes, FrameFeats = [], []
boxes, feats = [], []
bboxes, ffeats = [], []
timestamp = []
t1 = None
for line in lines:
if line.find('CameraId') >= 0:
t = int(line.split(',')[1].split(':')[1])
timestamp.append(t)
if len(boxes) and len(feats):
FrameBoxes.append(np.array(boxes, dtype = np.float32))
FrameFeats.append(np.array(feats, dtype = np.float32))
boxes, feats = [], []
if t1 and t - t1 > 1e4:
Videos.append((FrameBoxes, FrameFeats))
FrameBoxes, FrameFeats = [], []
t1 = int(line.split(',')[1].split(':')[1])
if line.find('box') >= 0:
box = line.split(':', )[1].split(',')[:-1]
boxes.append(box)
bboxes.append(boxes)
if line.find('feat') >= 0:
feat = line.split(':', )[1].split(',')[:-1]
feats.append(feat)
ffeats.append(feat)
FrameBoxes.append(np.array(boxes, dtype = np.float32))
FrameFeats.append(np.array(feats, dtype = np.float32))
Videos.append((FrameBoxes, FrameFeats))
TimeStamp = np.array(timestamp, dtype = np.float32)
DimesDiff = np.diff((timestamp))
return Videos
def inference_image(image, detections):
H, W, _ = np.shape(image)
imgs = []
batch_patches = []
patches = []
for d in range(np.size(detections, 0)):
tlbr = detections[d, :4].astype(np.int_)
tlbr[0] = max(0, tlbr[0])
tlbr[1] = max(0, tlbr[1])
tlbr[2] = min(W - 1, tlbr[2])
tlbr[3] = min(H - 1, tlbr[3])
img1 = image[tlbr[1]:tlbr[3], tlbr[0]:tlbr[2], :]
img = img1[:, :, ::-1].copy() # the model expects RGB inputs
patch = ReIDEncoder.transform(img)
imgs.append(img1)
# patch = patch.to(device=self.device).half()
if str(ReIDEncoder.device) != "cpu":
patch = patch.to(device=ReIDEncoder.device).half()
else:
patch = patch.to(device=ReIDEncoder.device)
patches.append(patch)
if (d + 1) % ReIDEncoder.batch_size == 0:
patches = torch.stack(patches, dim=0)
batch_patches.append(patches)
patches = []
if len(patches):
patches = torch.stack(patches, dim=0)
batch_patches.append(patches)
features = np.zeros((0, ReIDEncoder.embedding_size))
for patches in batch_patches:
pred = ReIDEncoder.model(patches)
pred[torch.isinf(pred)] = 1.0
feat = pred.cpu().data.numpy()
features = np.vstack((features, feat))
return imgs, features
def test_dog():
datapath = r"D:\datasets\ym\Img_ResnetData\dog_224x224\dog_224x224.txt"
with open(datapath, 'r') as file:
lines = file.readlines()
dlist = lines[0].split(',')
dfloat = [float(d) for d in dlist]
afeat = np.array(dfloat).reshape(1, -1)
imgpath = r"D:\datasets\ym\Img_ResnetData\dog_224x224\dog_224x224.jpg"
image = cv2.imread(imgpath)
patches = []
img = image[:, :, ::-1].copy() # the model expects RGB inputs
patch = ReIDEncoder.transform(img)
patch = patch.to(device=ReIDEncoder.device)
patches.append(patch)
patches = torch.stack(patches, dim=0)
pred = ReIDEncoder.model(patches)
pred[torch.isinf(pred)] = 1.0
bfeat = pred.cpu().data.numpy()
aafeat = afeat / np.linalg.norm(afeat, ord=2, axis=1, keepdims=True)
bbfeat = bfeat / np.linalg.norm(bfeat, ord=2, axis=1, keepdims=True)
cost_matrix = 1 - np.maximum(0.0, cdist(aafeat, bbfeat, 'cosine'))
print("Done!!!")
def main():
imgpath = r"D:\datasets\ym\Img_ResnetData\20240531-103547_0354b1cb-53fa-48de-86cd-ac3c5b127ada_6921168593576\3568800050000_0.jpeg"
datapath = r"D:\datasets\ym\Img_ResnetData\0_tracker_inout.data"
savepath = r"D:\datasets\ym\Img_ResnetData\result"
image = cv2.imread(imgpath)
Videos = read_data_file(datapath)
bboxes, afeats = Videos[0][0][0], Videos[0][1][0]
imgs, bfeats = inference_image(image, bboxes)
aafeats = afeats / np.linalg.norm(afeats, ord=2, axis=1, keepdims=True)
bbfeats = bfeats / np.linalg.norm(bfeats, ord=2, axis=1, keepdims=True)
cost_matrix = 1 - np.maximum(0.0, cdist(aafeats, bbfeats, 'cosine'))
for i, img in enumerate(imgs):
cv2.imwrite(savepath + f"\{i}.png", img)
print("Done!!!!")
if __name__ == '__main__':
main()
# test_dog()

View File

@ -110,8 +110,6 @@ def inference_image(image, detections):
def init_trackers(tracker_yaml = None, bs=1):
"""
Initialize trackers for object tracking during prediction.
@ -177,7 +175,7 @@ def run(
save_dir = Path(project) / Path(source).stem
if save_dir.exists():
print(Path(source).stem)
return
# return
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
@ -215,6 +213,9 @@ def run(
track_boxes = np.empty((0, 9), dtype = np.float32)
det_boxes = np.empty((0, 9), dtype = np.float32)
DetBoxes = np.empty((0, 6), dtype = np.float32)
TrackerBoxes = np.empty((0, 9), dtype = np.float32)
TrackerFeats = np.empty((0, 256), dtype = np.float32)
features_dict = {}
TracksDict = {}
@ -267,38 +268,40 @@ def run(
det = det.cpu().numpy()
det = np.concatenate([det[:, :4], np.arange(nd).reshape(-1, 1), det[:, 4:]], axis=-1)
DetBoxes = np.concatenate([DetBoxes, det[:, :6]], axis=0)
#
def static_estimate(box1, box2, TH1=8, TH2=12):
dij_abs = max(np.abs(box1 - box2))
dij_euc = max([np.linalg.norm((box1[:2] - box2[:2])),
np.linalg.norm((box1[2:4] - box2[2:4]))
])
if dij_abs < TH1 and dij_euc < TH2:
return True
else:
return False
## ============================================================ 前后帧相同 boxes 的特征赋值
# def static_estimate(box1, box2, TH1=8, TH2=12):
# dij_abs = max(np.abs(box1 - box2))
# dij_euc = max([np.linalg.norm((box1[:2] - box2[:2])),
# np.linalg.norm((box1[2:4] - box2[2:4]))
# ])
# if dij_abs < TH1 and dij_euc < TH2:
# return True
# else:
# return False
nw = 3 # 向前递推检查的窗口大小
nf = len(BoxesFeats) # 已经检测+特征提取的帧数
feat_curr = [None] * nd # nd: 当前帧检测出的boxes数
for ii in range(nd):
box = det[ii, :4]
# nw = 3 # 向前递推检查的窗口大小
# nf = len(BoxesFeats) # 已经检测+特征提取的帧数
# feat_curr = [None] * nd # nd: 当前帧检测出的boxes数
# for ii in range(nd):
# box = det[ii, :4]
kk=1
feat = None
while kk <= nw and nf>=kk:
ki = -1 * kk
boxes_ = BoxesFeats[ki][0]
feats_ = BoxesFeats[ki][1]
# kk=1
# feat = None
# while kk <= nw and nf>=kk:
# ki = -1 * kk
# boxes_ = BoxesFeats[ki][0]
# feats_ = BoxesFeats[ki][1]
flag = [jj for jj in range(len(boxes_)) if static_estimate(box, boxes_[jj, :4])]
if len(flag) == 1:
feat = feats_[flag[0]]
break
kk += 1
if feat is not None:
feat_curr[ii] = feat
# flag = [jj for jj in range(len(boxes_)) if static_estimate(box, boxes_[jj, :4])]
# if len(flag) == 1:
# feat = feats_[flag[0]]
# break
# kk += 1
# if feat is not None:
# feat_curr[ii] = feat
@ -319,7 +322,11 @@ def run(
'''================== 1. 存储 dets/subimgs/features Dict ============='''
imgs, features = inference_image(im0, tracks)
BoxesFeats.append((tracks, features))
TrackerFeats = np.concatenate([TrackerFeats, features], axis=0)
imgdict = {}
@ -532,7 +539,10 @@ def main_loop(opt):
# r"D:\datasets\ym\广告板遮挡测试\8\2500441577966_20240508-175946_front_addGood_70f75407b7ae_155_17788571404.mp4"
# ]
# files = [r"D:\datasets\ym\videos\标记视频\test_20240402-173935_6920152400975_back_174037372.mp4"]
# files = [r"D:\datasets\ym\视频\20240529\110518062-090ac04c-0a8c-479f-bc18-cb3553c90683-0_seek0.017962635633665514.mp4"]
files = [r"D:\datasets\ym\视频\20240529\110518060-550b7c4d-9946-4aa4-9131-81008692cd65-1_seek0.7670042724609232.mp4"]
for file in files:
optdict["source"] = file
run(**optdict)

View File

@ -84,7 +84,6 @@ class Track:
boxes: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]
0 1 2 3 4 5 6 7 8
'''
# 不满足以下条件时会如何?
# assert len(set(boxes[:, 4].astype(int))) == 1, "For a Track, track_id more than 1"
# assert len(set(boxes[:, 6].astype(int))) == 1, "For a Track, class number more than 1"
@ -110,7 +109,8 @@ class Track:
'''5个关键点中心点、左上点、右上点、左下点、右下点 )坐标'''
self.compute_cornpoints()
'''5个关键点轨迹特征可以在子类中实现降低顺序处理时的计算量'''
'''5个关键点轨迹特征可以在子类中实现降低顺序处理时的计算量
(中心点、左上点、右上点、左下点、右下点 )轨迹特征'''
self.compute_cornpts_feats()
@ -341,8 +341,6 @@ class Track:
return
class doTracks:
def __init__(self, bboxes, TracksDict):
@ -394,42 +392,7 @@ class doTracks:
lfeats.append(afeat)
return lfeats
'''
def classify(self):
tracks = self.tracks
# 提取手的frame_id并和动目标的frame_id 进行关联
hand_tracks = [t for t in tracks if t.cls==0]
self.Hands.extend(hand_tracks)
tracks = self.sub_tracks(tracks, hand_tracks)
# 提取小孩的track并计算状态left, right, incart
kid_tracks = [t for t in tracks if t.cls==9]
kid_states = [self.kid_state(t) for t in kid_tracks]
self.Kids = [x for x in zip(kid_tracks, kid_states)]
tracks = self.sub_tracks(tracks, kid_tracks)
static_tracks = [t for t in tracks if t.frnum>1 and t.is_static()]
self.Static.extend(static_tracks)
# 剔除静止目标后的 tracks
tracks = self.sub_tracks(tracks, static_tracks)
return tracks
'''
def similarity(self):
nt = len(self.tracks)

View File

@ -23,34 +23,6 @@ class doBackTracks(doTracks):
self.shopcart = ShoppingCart(bboxes)
# =============================================================================
# def array2list(self):
# ''' 0, 1, 2, 3, 4, 5, 6, 7, 8
# bboxes: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]
# lboxes[x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]
# '''
#
# track_ids = set(self.bboxes[:, 4])
# lboxes = []
# for t_id in track_ids:
# idx = np.where(self.bboxes[:, 4] == t_id)[0]
# box = self.bboxes[idx, :]
#
# x = (box[:, 0] + box[:, 2]) / 2
# y = (box[:, 1] + box[:, 3]) / 2
#
# # box: [x, y, w, h, track_id, score, cls, frame_index]
# box[:, 2] = box[:, 2] - box[:, 0]
# box[:, 3] = box[:, 3] - box[:, 1]
# box[:, 0] = x
# box[:, 1] = y
#
# lboxes.append(box)
#
#
# return lboxes
# =============================================================================
def classify(self):
'''功能:对 tracks 中元素分类 '''

View File

@ -122,25 +122,6 @@ class doFrontTracks(doTracks):
"""
对不同id但可能是同一商品的目标进行归并
"""
# =============================================================================
# mergedTracks = []
# alist = [t for t in Residual]
# while alist:
# atrack = alist[0]
# cur_list = []
# cur_list.append(atrack)
# alist.pop(0)
#
# blist = [b for b in alist]
# alist = []
# for btrack in blist:
# if track_equal_track(atrack, btrack, self.TracksDict):
# cur_list.append(btrack)
# else:
# alist.append(btrack)
#
# mergedTracks.append(cur_list)
# =============================================================================
mergedTracks = self.base_merge_tracks(Residual)
oldtracks, newtracks = [], []
@ -175,27 +156,3 @@ class doFrontTracks(doTracks):
return merged
# =============================================================================
# def array2list(self):
# '''
# 将 bboxes 变换为 track 列表
# bboxes: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]
# Return
# lboxes列表列表中元素具有同一 track_idx1y1x2y2 格式
# [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]
# '''
# track_ids = set(self.bboxes[:, 4])
# lboxes = []
# for t_id in track_ids:
# # print(f"The ID is: {t_id}")
# idx = np.where(self.bboxes[:, 4] == t_id)[0]
# box = self.bboxes[idx, :]
#
# lboxes.append(box)
#
# return lboxes
# =============================================================================

View File

@ -48,7 +48,6 @@ class backTrack(Track):
# self.PCA()
def isimgborder(self, BoundPixel=10, BoundThresh=0.3):
x1, y1 = self.cornpoints[:,2], self.cornpoints[:,3],
@ -74,69 +73,12 @@ class backTrack(Track):
MarginState = [condtA, condtB, condtC, condtD, condtE, condtF, condtG]
return isCornpoint, MarginState
# =============================================================================
# def PositionState(self, camerType="back"):
# '''
# 已迁移至基类
# camerType: back, 后置摄像头
# front, 前置摄像头
# '''
# if camerType=="front":
# incart = cv2.imread("./shopcart/cart_tempt/incart.png", cv2.IMREAD_GRAYSCALE)
# outcart = cv2.imread("./shopcart/cart_tempt/outcart.png", cv2.IMREAD_GRAYSCALE)
# else:
# incart = cv2.imread("./shopcart/cart_tempt/incart_ftmp.png", cv2.IMREAD_GRAYSCALE)
# outcart = cv2.imread("./shopcart/cart_tempt/outcart_ftmp.png", cv2.IMREAD_GRAYSCALE)
#
# xc, yc = self.cornpoints[:,0].clip(0,self.imgshape[0]-1).astype(np.int64), self.cornpoints[:,1].clip(0,self.imgshape[1]-1).astype(np.int64)
# x1, y1 = self.cornpoints[:,6].clip(0,self.imgshape[0]-1).astype(np.int64), self.cornpoints[:,7].clip(0,self.imgshape[1]-1).astype(np.int64)
# x2, y2 = self.cornpoints[:,8].clip(0,self.imgshape[0]-1).astype(np.int64), self.cornpoints[:,9].clip(0,self.imgshape[1]-1).astype(np.int64)
#
# # print(self.tid)
# Cent_inCartnum = np.count_nonzero(incart[(yc, xc)])
# LB_inCartnum = np.count_nonzero(incart[(y1, x1)])
# RB_inCartnum = np.count_nonzero(incart[(y2, x2)])
#
# Cent_outCartnum = np.count_nonzero(outcart[(yc, xc)])
# LB_outCartnum = np.count_nonzero(outcart[(y1, x1)])
# RB_outCartnum = np.count_nonzero(outcart[(y2, x2)])
#
# '''Track完全在车内左下角点、右下角点与 outcart 的交集为 0'''
# self.isWholeInCart = False
# if LB_outCartnum + RB_outCartnum == 0:
# self.isWholeInCart = True
#
# '''Track完全在车外左下角点、中心点与 incart 的交集为 0
# 右下角点、中心点与 incart 的交集为 0
# '''
# self.isWholeOutCart = False
# if Cent_inCartnum + LB_inCartnum == 0 or Cent_inCartnum + RB_inCartnum == 0:
# self.isWholeOutCart = True
#
#
# self.Cent_isIncart = False
# self.LB_isIncart = False
# self.RB_isIncart = False
# if Cent_inCartnum: self.Cent_isIncart = True
# if LB_inCartnum: self.LB_isIncart = True
# if RB_inCartnum: self.RB_isIncart = True
#
# self.posState = self.Cent_isIncart+self.LB_isIncart+self.RB_isIncart
# =============================================================================
def PCA(self):
self.pca = PCA()
X = self.cornpoints[:, 0:2]
self.pca.fit(X)
def compute_ious_feat(self):
@ -194,96 +136,8 @@ class backTrack(Track):
self.feature_ious = (incart_iou, outcart_iou, cartboarder_iou, maxbox_iou, minbox_iou)
self.incartrates = incartrates
def compute_static_fids(self, thresh1 = 12, thresh2 = 3):
'''
计算 track 的轨迹中相对处于静止状态的轨迹点的start_frame_id, end_frame_id
thresh1: 相邻两帧目标中心点是否静止的的阈值,以像素为单位,
thresh2: 连续捕捉到目标处于静止状态的帧数,当 thresh2 = 3时,至少连续 4个点,
产生3个相邻点差值均小于 thresh1 时,判定为连续静止.
处理过程中利用了插值技术因此start、end并非 self.boxes 中对应的帧索引
'''
BoundPixel = 8
x1, y1 = self.cornpoints[:,2], self.cornpoints[:,3],
x2, y2 = self.cornpoints[:,8], self.cornpoints[:,9]
cont1 = sum(abs(x1)<BoundPixel) > 3
# cont2 = sum(abs(y1)<BoundPixel) > 3
cont3 = sum(abs(x2-self.imgshape[0])<BoundPixel) > 3
# cont4 = sum(abs(y2-self.imgshape[1])<BoundPixel) > 3
cont = not(cont1 or cont3)
## ============== 下一步,启用中心点,选择具有最小运动幅度的角点作为参考点
static_index = []
if self.frnum>=2 and cont:
x1 = self.boxes[1:,7]
x2 = [i for i in range(int(min(x1)), int(max(x1)+1))]
dist_adjc = np.interp(x2, x1, self.trajmin)
# dist_adjc = self.trajmin
static_thresh = (dist_adjc < thresh1)[:, None].astype(np.uint8)
static_cnts, _ = cv2.findContours(static_thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
for cnt in static_cnts:
_, start, _, num = cv2.boundingRect(cnt)
end = start + num
if num < thresh2:
continue
static_index.append((start, end))
static_index = np.array(static_index)
if static_index.size:
indx = np.argsort(static_index[:, 0])
static_index = static_index[indx]
return static_index
def compute_dynamic_fids(self, thresh1 = 12, thresh2 = 3):
'''
计算 track 的轨迹中运动轨迹点的start_frame_id, end_frame_id
thresh1: 相邻两帧目标中心点是否运动的阈值,以像素为单位,
thresh2: 连续捕捉到目标连续运动的帧数
目标:
1. 计算轨迹方向
2. 计算和手部运动的关联性
'''
moving_index = []
if self.frnum>=2:
x1 = self.boxes[1:,7]
x2 = [i for i in range(int(min(x1)), int(max(x1)+1))]
dist_adjc = np.interp(x2, x1, self.trajmin)
moving_thresh = (dist_adjc >= thresh1)[:, None].astype(np.uint8)
moving_cnts, _ = cv2.findContours(moving_thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
for cnt in moving_cnts:
_, start, _, num = cv2.boundingRect(cnt)
if num < thresh2:
continue
end = start + num
moving_index.append((start, end))
# =============================================================================
# '''========= 输出帧id不太合适 ========='''
# moving_fids = []
# for i in range(len(moving_index)):
# i1, i2 = moving_index[i]
# fid1, fid2 = boxes[i1, 7], boxes[i2, 7]
# moving_fids.append([fid1, fid2])
# moving_fids = np.array(moving_fids)
# =============================================================================
moving_index = np.array(moving_index)
if moving_index.size:
indx = np.argsort(moving_index[:, 0])
moving_index = moving_index[indx]
return moving_index
def compute_static_dynamic_fids(self):
if self.MarginState[0] or self.MarginState[2]:
@ -319,17 +173,6 @@ class backTrack(Track):
return static, dynamic
# =============================================================================
# static_dynamic_fids = []
# for traj in self.trajectory:
# static, dynamic = self.pt_state_fids(traj)
# static_dynamic_fids.append((static, dynamic))
#
# return static_dynamic_fids
# =============================================================================
def is_static(self):
'''静态情况 1: 目标关键点最小相对运动轨迹 < 0.2, 指标值偏大
@ -354,7 +197,6 @@ class backTrack(Track):
# and self.posState >= 2
# and self.TrajFeat[0] < 240 \
'''静态情况 3: 目标初始状态和最终状态均为静止'''
condt3 = self.static_index.shape[0] >= 2 \
and self.static_index[0, 0] <= 2 \
@ -370,50 +212,23 @@ class backTrack(Track):
and self.static_index[0, 1] >= 6 \
and self.static_index[-1, 0] <= self.frnum-5 \
and self.static_index[-1, 1] >= self.frnum-2
condt = condt1 or condt2 or condt3 or condt4
return condt
# =============================================================================
# track1 = [t for t in tracks if t.TrajFeat[5] < 0.2
# or t.TrajFeat[3] < 120
# ]
#
# track2 = [t for t in tracks if t.static_index.size > 0
# and t.static_index[0, 0] <= 2
# and t.TrajFeat[5] < 0.5]
#
# track3 = [t for t in tracks if t.static_index.shape[0] >= 2
# and t.static_index[0, 0] <= 2
# and t.static_index[-1, 1] >= t.frnum-3]
#
# track12 = self.join_tracks(track1, track2)
#
# '''提取静止状态的 track'''
# static_tracks = self.join_tracks(track12, track3)
# self.Static.extend(static_tracks)
#
# =============================================================================
def is_OutTrack(self):
if self.posState <= 1:
isout = True
else:
isout = False
return isout
def compute_distance(self):
pass
def move_start_fid(self):
pass
def move_end_fid(self):
pass

View File

@ -15,17 +15,9 @@ class frontTrack(Track):
def __init__(self, boxes, features, imgshape=(1024, 1280)):
super().__init__(boxes, features, imgshape)
'''5个关键点中心点、左上点、右上点、左下点、右下点 )轨迹特征'''
# self.compute_cornpts_feats()
self.CART_HIGH_THRESH1 = imgshape[1]/2.98
# if self.tid==10:
# print(f"ID: {self.tid}")
'''y1、y2静止状态区间值是 boxes 中对 axis=0 的索引,不是帧索引'''
det_y1 = np.diff(boxes[:, 1], axis=0)
det_y2 = np.diff(boxes[:, 3], axis=0)
@ -78,69 +70,6 @@ class frontTrack(Track):
condt = all(self.boxes[:, 3] > self.imgshape[1]-20)
return condt
# def is_OutTrack(self):
# isout = False
# if self.posState <= 1:
# isout = True
# return isout
# =============================================================================
# def compute_static_fids(self, det_y, STATIC_THRESH = 8):
# '''
# 前摄时y一般选择为 box 的 y1 坐标,且需限定商品在购物车内。
# inputs
# y1D array
# parameters
# STATIC_THRESH轨迹处于静止状态的阈值。
# outputs
# 输出为差分值小于 STATIC_THRESH 的y中元素的start, end索引
# ranges = [(x1, y1),
# (x1, y1),
# ...]
# '''
# # print(f"The ID is: {self.tid}")
#
# # det_y = np.diff(y, axis=0)
# ranges, rangex = [], []
#
# static_indices = np.where(np.abs(det_y) < STATIC_THRESH)[0]
#
# if len(static_indices) == 0:
# rangex.append((0, len(det_y)))
# return ranges, rangex
#
# start_index = static_indices[0]
#
# for i in range(1, len(static_indices)):
# if static_indices[i] != static_indices[i-1] + 1:
# ranges.append((start_index, static_indices[i-1] + 1))
# start_index = static_indices[i]
# ranges.append((start_index, static_indices[-1] + 1))
#
# if len(ranges) == 0:
# rangex.append((0, len(det_y)))
# return ranges, rangex
#
# idx1, idx2 = ranges[0][0], ranges[-1][1]
#
# if idx1 != 0:
# rangex.append((0, idx1))
#
# # 轨迹的最后阶段是运动状态
# for k in range(1, len(ranges)):
# index1 = ranges[k-1][1]
# index2 = ranges[k][0]
# rangex.append((index1, index2))
#
# if idx2 != len(det_y):
# rangex.append((idx2, len(det_y)))
#
# return ranges, rangex
#
# =============================================================================
def is_static(self):
@ -202,14 +131,6 @@ class frontTrack(Track):
return condt
def is_upward(self):
'''判断商品是否取出,'''
print(f"The ID is: {self.tid}")

View File

@ -16,9 +16,6 @@ import pandas as pd
from scipy.spatial.distance import cdist
from pathlib import Path
# ================= using for import ultralytics
import sys
sys.path.append(r"D:\DetectTracking")
@ -85,13 +82,13 @@ def save_subimgs(vts, file, TracksDict):
cv2.imwrite(str(imgdir) + f"/{tid}_{fid}_{bid}.png", img)
def have_tracked():
trackdict = r'./data/trackdicts_1'
trackdict = r'./data/trackdicts'
alltracks = []
k = 0
gt = Profile()
for filename in os.listdir(trackdict):
# filename = 'test_20240402-173935_6920152400975_back_174037372.pkl'
filename = '加购_91.pkl'
# filename = '加购_91.pkl'
file, ext = os.path.splitext(filename)
filepath = os.path.join(trackdict, filename)
@ -123,9 +120,9 @@ def have_tracked():
k += 1
if k==1:
break
# k += 1
# if k==1:
# break
if len(alltracks):
drawFeatures(alltracks, save_dir)

223
tracking/test_val.py Normal file
View File

@ -0,0 +1,223 @@
# -*- coding: utf-8 -*-
"""
Created on Thu May 30 14:03:03 2024
@author: ym
"""
import os
import cv2
import numpy as np
from pathlib import Path
import sys
sys.path.append(r"D:\DetectTracking")
from tracking.utils.plotting import Annotator, colors
from tracking.utils import Boxes, IterableSimpleNamespace, yaml_load
from tracking.trackers import BOTSORT, BYTETracker
from tracking.dotrack.dotracks_back import doBackTracks
from tracking.dotrack.dotracks_front import doFrontTracks
from tracking.utils.drawtracks import plot_frameID_y2, draw_all_trajectories
W, H = 1024, 1280
Mode = 'front' #'back'
def read_data_file(datapath):
with open(datapath, 'r') as file:
lines = file.readlines()
Videos = []
FrameBoxes, FrameFeats = [], []
boxes, feats = [], []
bboxes, ffeats = [], []
timestamp = []
t1 = None
for line in lines:
if line.find('CameraId') >= 0:
t = int(line.split(',')[1].split(':')[1])
timestamp.append(t)
if len(boxes) and len(feats):
FrameBoxes.append(np.array(boxes, dtype = np.float32))
FrameFeats.append(np.array(feats, dtype = np.float32))
boxes, feats = [], []
if t1 and t - t1 > 1e4:
Videos.append((FrameBoxes, FrameFeats))
FrameBoxes, FrameFeats = [], []
t1 = int(line.split(',')[1].split(':')[1])
if line.find('box') >= 0:
box = line.split(':', )[1].split(',')[:-1]
boxes.append(box)
bboxes.append(boxes)
if line.find('feat') >= 0:
feat = line.split(':', )[1].split(',')[:-1]
feats.append(feat)
ffeats.append(feat)
FrameBoxes.append(np.array(boxes, dtype = np.float32))
FrameFeats.append(np.array(feats, dtype = np.float32))
Videos.append((FrameBoxes, FrameFeats))
TimeStamp = np.array(timestamp, dtype = np.float32)
DimesDiff = np.diff((timestamp))
return Videos
def video2imgs(path):
vpath = os.path.join(path, "videos")
k = 0
have = False
for filename in os.listdir(vpath):
file, ext = os.path.splitext(filename)
imgdir = os.path.join(path, file)
if os.path.exists(imgdir):
continue
else:
os.mkdir(imgdir)
vfile = os.path.join(vpath, filename)
cap = cv2.VideoCapture(vfile)
i = 0
while True:
ret, frame = cap.read()
if not ret:
break
i += 1
imgp = os.path.join(imgdir, file+f"_{i}.png")
cv2.imwrite(imgp, frame)
print(filename+f": {i}")
cap.release()
k+=1
if k==1000:
break
def draw_boxes():
datapath = r'D:\datasets\ym\videos_test\20240530\1_tracker_inout(1).data'
VideosData = read_data_file(datapath)
bboxes = VideosData[0][0]
ffeats = VideosData[0][1]
videopath = r"D:\datasets\ym\videos_test\20240530\134458234-1cd970cf-f8b9-4e80-9c2e-7ca3eec83b81-1_seek0.10415589124891511.mp4"
cap = cv2.VideoCapture(videopath)
i = 0
while True:
ret, frame = cap.read()
if not ret:
break
annotator = Annotator(frame.copy(), line_width=3)
boxes = bboxes[i]
for *xyxy, conf, cls in reversed(boxes):
label = f'{int(cls)}: {conf:.2f}'
color = colors(int(cls), True)
annotator.box_label(xyxy, label, color=color)
img = annotator.result()
imgpath = r"D:\datasets\ym\videos_test\20240530\result\int8_front\{}.png".format(i+1)
cv2.imwrite(imgpath, img)
print(f"Output: {i}")
i += 1
cap.release()
def init_tracker(tracker_yaml = None, bs=1):
"""
Initialize tracker for object tracking during prediction.
"""
TRACKER_MAP = {'bytetrack': BYTETracker, 'botsort': BOTSORT}
cfg = IterableSimpleNamespace(**yaml_load(tracker_yaml))
tracker = TRACKER_MAP[cfg.tracker_type](args=cfg, frame_rate=30)
return tracker
def tracking(bboxes, ffeats):
tracker_yaml = r"./trackers/cfg/botsort.yaml"
tracker = init_tracker(tracker_yaml)
track_boxes = np.empty((0, 9), dtype = np.float32)
features_dict = {}
'''==================== 执行跟踪处理 ======================='''
for dets, feats in zip(bboxes, ffeats):
# 需要根据frame_id重排序
det_tracking = Boxes(dets).cpu().numpy()
tracks = tracker.update(det_tracking, feats)
if len(tracks):
track_boxes = np.concatenate([track_boxes, tracks], axis=0)
feat_dict = {int(x.idx): x.curr_feat for x in tracker.tracked_stracks if x.is_activated}
frame_id = tracks[0, 7]
features_dict.update({int(frame_id): feat_dict})
return det_tracking, features_dict
def main():
datapath = r'D:\datasets\ym\videos_test\20240530\1_tracker_inout(1).data'
VideosData = read_data_file(datapath)
bboxes = VideosData[0][0]
ffeats = VideosData[0][1]
bboxes, feats_dict = tracking(bboxes, ffeats)
if Mode == "front":
vts = doFrontTracks(bboxes, feats_dict)
vts.classify()
plt = plot_frameID_y2(vts)
plt.savefig('front_y2.png')
# plt.close()
else:
vts = doBackTracks(bboxes, feats_dict)
vts.classify()
edgeline = cv2.imread("./shopcart/cart_tempt/edgeline.png")
draw_all_trajectories(vts, edgeline, save_dir, filename)
if __name__ == "__main__":
filename = 'traj.png'
save_dir = Path('./result')
if not save_dir.exists():
save_dir.mkdir(parents=True, exist_ok=True)
main()

View File

@ -15,8 +15,11 @@ class Config:
embedding_size = 256
img_size = 224
ckpt_path = r"ckpts\resnet18_1220\best.pth"
ckpt_path = r"ckpts\best_resnet18_1887_0311.pth"
current_path = os.path.dirname(os.path.abspath(__file__))
model_path = os.path.join(current_path, r"ckpts\resnet18_1220\best.pth")
model_path = os.path.join(current_path, ckpt_path)
# model_path = "./trackers/reid/ckpts/resnet18_1220/best.pth"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

View File

@ -46,6 +46,8 @@ class ReIDInterface:
self.model = nn.DataParallel(model).to(self.device)
self.model = model
self.model.load_state_dict(torch.load(self.model_path, map_location=self.device))
self.model.eval()