回传数据解析,兼容v5和v10

This commit is contained in:
jiajie555
2025-04-18 14:41:53 +08:00
commit 010f5c445a
888 changed files with 93632 additions and 0 deletions

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,315 @@
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 10 11:01:39 2024
@author: ym
"""
import os
import numpy as np
# from matplotlib.pylab import mpl
# mpl.use('Qt5Agg')
import matplotlib.pyplot as plt
import sys
sys.path.append(r"D:\DetectTracking")
from move_detect import MoveDetect
# from tracking.utils.read_data import extract_data, read_deletedBarcode_file, read_tracking_output, read_weight_timeConsuming
from tracking.utils.read_data import read_weight_timeConsuming
def str_to_float_arr(s):
# 移除字符串末尾的逗号(如果存在)
if s.endswith(','):
s = s[:-1]
# 使用split()方法分割字符串然后将每个元素转化为float
float_array = [float(x) for x in s.split(",")]
return float_array
def find_samebox_in_array(arr, target):
for i, st in enumerate(arr):
if st[:4] == target[:4]:
return i
return -1
def array2frame(bboxes):
frameID = np.sort(np.unique(bboxes[:, 7].astype(int)))
# frame_ids = bboxes[:, frameID].astype(int)
fboxes, ttamps = [], []
for fid in frameID:
idx = np.where(bboxes[:, 7] == fid)[0]
box = bboxes[idx, :]
fboxes.append(box)
ttamps.append(int(box[0, 9]))
frameTstamp = np.concatenate((frameID[:,None], np.array(ttamps)[:,None]), axis=1)
return fboxes, frameTstamp
def extract_data_1(datapath):
'''
要求每一帧(包括最后一帧)输出数据后有一空行作为分割行,该分割行为标志行
'''
trackerboxes = np.empty((0, 10), dtype=np.float64)
trackerfeats = np.empty((0, 256), dtype=np.float64)
boxes, feats, tboxes, tfeats = [], [], [], []
timestamp = -1
newframe = False
with open(datapath, 'r', encoding='utf-8') as lines:
for line in lines:
if line.find("CameraId")>=0:
newframe = True
timestamp, frameId = [int(ln.split(":")[1]) for ln in line.split(",")[1:]]
# boxes, feats, tboxes, tfeats = [], [], [], []
if line.find("box:") >= 0 and line.find("output_box:") < 0:
line = line.strip()
box = line[line.find("box:") + 4:].strip()
# if len(box)==6:
boxes.append(str_to_float_arr(box))
if line.find("feat:") >= 0:
line = line.strip()
feat = line[line.find("feat:") + 5:].strip()
# if len(feat)==256:
feats.append(str_to_float_arr(feat))
if line.find("output_box:") >= 0:
line = line.strip()
# 确保 boxes 和 feats 一一对应,并可以保证 tboxes 和 tfeats 一一对应
if len(boxes)==0 or len(boxes)!=len(feats):
continue
box = str_to_float_arr(line[line.find("output_box:") + 11:].strip())
box.append(timestamp)
index = find_samebox_in_array(boxes, box)
if index >= 0:
tboxes.append(box) # 去掉'output_box:'并去除可能的空白字符
# feat_f = str_to_float_arr(input_feats[index])
feat_f = feats[index]
norm_f = np.linalg.norm(feat_f)
feat_f = feat_f / norm_f
tfeats.append(feat_f)
'''标志行(空行)判断'''
condt = line.find("timestamp")<0 and line.find("box:")<0 and line.find("feat:")<0
if condt and newframe:
if len(tboxes) and len(tfeats):
trackerboxes = np.concatenate((trackerboxes, np.array(tboxes)))
trackerfeats = np.concatenate((trackerfeats, np.array(tfeats)))
timestamp = -1
boxes, feats, tboxes, tfeats = [], [], [], []
newframe = False
return trackerboxes, trackerfeats
def devide_motion_state(tboxes, width):
'''frameTstamp: 用于标记当前相机视野内用购物车运动状态变化
Hand状态
0: 不存在
1: 手部存在
2: 手部存在且处于某种状态(静止)
'''
fboxes, frameTstamp = array2frame(tboxes)
fnum = len(frameTstamp)
state = np.zeros((fnum, 2), dtype=np.int64)
frameState = np.concatenate((frameTstamp, state), axis = 1).astype(np.int64)
handState = np.concatenate((frameTstamp, state), axis = 1).astype(np.int64)
if fnum < width:
return frameState, handState
mtrackFid = {}
handFid = {}
'''frameState 标记由图像判断的购物车状态0: 静止1: 运动'''
for idx in range(width, fnum+1):
idx0 = idx-width
# if idx == 40:
# print("123")
lboxes = np.concatenate(fboxes[idx0:idx], axis = 0)
md = MoveDetect(lboxes)
md.classify()
## track.during 二元素组, 表征在该时间片段内,轨迹 track 的起止时间,数值用 boxes[:, 7]
for track in md.track_motion:
f1, f2 = track.during
# if track.cls == 0: continue
idx1 = set(np.where(frameState[:,0] >= f1)[0])
idx2 = set(np.where(frameState[:,0] <= f2)[0])
idx3 = list(idx1.intersection(idx2))
if track.tid not in mtrackFid:
mtrackFid[track.tid] = set(idx3)
else:
mtrackFid[track.tid] = mtrackFid[track.tid].union(set(idx3))
frameState[idx-1, 3] = 1
frameState[idx3, 2] = 1
for track in md.hand_tracks:
f11, f22 = track.during
idx11 = set(np.where(handState[:,0] >= f11)[0])
idx22 = set(np.where(handState[:,0] <= f22)[0])
idx33 = list(idx11.intersection(idx22))
'''手部存在标记'''
handState[idx33, 2] = 1
'''未来改进方向is_static 可以用手部状态判断的函数代替'''
if track.is_static(70) and len(idx33)>1:
idx11 = set(np.where(handState[:,0] >= f11)[0])
idx22 = set(np.where(handState[:,0] <= f22)[0])
idx33 = list(idx11.intersection(idx22))
'''手部静止标记'''
handState[idx33, 2] = 2
'''状态变化输出'''
for tid, fid in mtrackFid.items():
fstate = np.zeros((fnum, 1), dtype=np.int64)
fstate[list(fid), 0] = tid
frameState = np.concatenate((frameState, fstate), axis = 1).astype(np.int64)
return frameState, handState
def state_measure(periods, weights, hands, spath=None):
'''两种状态static、motion,
(t0, t1)
t0: static ----> motion
t1: motion ----> static
'''
PrevState = 'static'
CuurState = 'static'
camtype_0, frstate_0 = periods[0]
camtype_1, frstate_1 = periods[1]
'''计算总时间区间: tmin, tmax, during'''
tmin_w, tmax_w = np.min(weights[:, 0]), np.max(weights[:, 0])
tmin_0, tmax_0 = np.min(frstate_0[:, 1]), np.max(frstate_0[:, 1])
tmin_1, tmax_1 = np.min(frstate_1[:, 1]), np.max(frstate_1[:, 1])
tmin = min([tmin_w, tmin_0, tmin_1])
tmax = max([tmax_w, tmax_0, tmax_1])
# for ctype, tboxes, _ in tracker_boxes:
# t_min, t_max = np.min(tboxes[:, 9]), np.max(tboxes[:, 9])
# if t_min<tmin:
# tmin = t_min
# if t_max>tmax:
# tmax = t_max
# during = tmax - tmin
fig, (ax1, ax2, ax3) = plt.subplots(3, 1)
ax1.plot(weights[:, 0] - tmin, weights[:, 1], 'bo-', linewidth=1, markersize=4)
# ax1.set_xlim([0, during])
ax1.set_title('Weight (g)')
ax2.plot(frstate_0[:, 1] - tmin, frstate_0[:, 2], 'rx-', linewidth=1, markersize=8)
ax2.plot(frstate_0[:, 1] - tmin, frstate_0[:, 3], 'bo-', linewidth=1, markersize=4)
# ax2.set_xlim([0, during])
ax2.set_title(f'Camera: {int(camtype_0)}')
ax3.plot(frstate_1[:, 1] - tmin, frstate_1[:, 2], 'rx-', linewidth=1, markersize=8)
ax3.plot(frstate_1[:, 1] - tmin, frstate_1[:, 3], 'bo-', linewidth=1, markersize=4)
ax3.set_title(f'Camera: {int(camtype_1)}')
if spath:
plt.savefig(spath)
plt.show()
def read_yolo_weight_data(eventdir):
filepaths = []
for filename in os.listdir(eventdir):
file, ext = os.path.splitext(filename)
if ext =='.data':
filepath = os.path.join(eventdir, filename)
filepaths.append(filepath)
if len(filepaths) != 5:
return
tracker_boxes = []
WeightDict, SensorDict, ProcessTimeDict = {}, {}, {}
for filepath in filepaths:
filename = os.path.basename(filepath)
if filename.find('_track.data')>0:
CamerType = filename.split('_')[0]
trackerboxes, trackerfeats = extract_data_1(filepath)
tracker_boxes.append((CamerType, trackerboxes, trackerfeats))
if filename.find('process.data')==0:
WeightDict, SensorDict, ProcessTimeDict = read_weight_timeConsuming(filepath)
'''====================重力信号处理===================='''
weights = [(float(t), w) for t, w in WeightDict.items()]
weights = np.array(weights)
return tracker_boxes, weights
def main():
eventdir = r"\\192.168.1.28\share\测试_202406\0819\images\20240817-192549-6940120c-634c-481b-97a6-65042729f86b_null"
tracker_boxes, weights = read_yolo_weight_data(eventdir)
'''====================图像运动分析===================='''
win_width = 12
periods, hands = [], []
for ctype, tboxes, _ in tracker_boxes:
period, handState = devide_motion_state(tboxes, win_width)
periods.append((ctype, period))
hands.append((ctype, handState))
print('done!')
'''===============重力、图像信息融合==================='''
state_measure(periods, weights, hands)
if __name__ == "__main__":
main()

138
realtime/full_realtime.py Normal file
View File

@ -0,0 +1,138 @@
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 25 09:17:32 2024
@author: ym
"""
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
sys.path.append(r"D:\DetectTracking")
from contrast.utils.event import ShoppingEvent
from tracking.utils.read_data import read_weight_sensor, extract_data_realtime, read_tracking_output_realtime
from tracking.utils.read_data import read_process
def time_std2stamp(custom_time):
date_part = custom_time.split('-')[0]
time_part = custom_time.split('-')[1]
ms_part = int(custom_time.split('-')[2])
standard_time = f"{date_part} {time_part[:2]}:{time_part[2:4]}:{time_part[4:]}"
dt = datetime.strptime(standard_time, "%Y%m%d %H:%M:%S")
timestamp = int(dt.timestamp() * 1000) + ms_part
return timestamp
def time_stamp2std(timestamp):
if isinstance(timestamp, float) or isinstance(timestamp, str):
timestamp = int(timestamp)
ms = timestamp%1000
times = timestamp//1000
std_time = datetime.fromtimestamp(times)
stdtime = std_time.strftime("%Y%m%d-%H%M%S") + '-' +str(ms)
return stdtime
def get_timeduring_weight(procpath):
eventStart, eventEnd, weightValue = None, None, None
if os.path.isfile(procpath):
timeDict = read_process(procpath)
if "eventStart" in timeDict.keys():
eventStart = timeDict["eventStart"]
if "eventEnd" in timeDict.keys():
eventEnd = timeDict["eventEnd"]
if "weightValue" in timeDict.keys():
weightValue = timeDict["weightValue"]
return eventStart, eventEnd, weightValue
def event_devide(wpath):
'''
基于重力时序数据 _weight.data 进行事件切分
'''
# wpath = r'D:\全实时\source_data\2024122416\20241224-162658370_weight.data'
tpath, _ = os.path.split(wpath)
wsdata = read_weight_sensor(wpath)
times, weights = wsdata[:, 0], wsdata[:, 1]
Start, End = times[0], times[-1]
evtpaths, evtTimeWeight = [], []
for filename in os.listdir(tpath):
filelist = filename.split('_')
custom_time = filelist[0]
evtpath = os.path.join(tpath, filename)
if os.path.isdir(evtpath):
stamp = time_std2stamp(custom_time)
if stamp >= Start and stamp <= End:
evtpaths.append(evtpath)
for evtpath in evtpaths:
evtname = os.path.basename(evtpath)
event = ShoppingEvent(evtpath, stype = "realtime")
# try:
# event = ShoppingEvent(evtpath, stype = "realtime")
# except Exception as e:
# print(f"Error is: {e}", evtname)
'''读取事件的起止时间、重力变化值'''
propath = os.path.join(evtpath, "process.data")
evtStart, evtEnd, wgtValue = get_timeduring_weight(propath)
evtTimeWeight.append((evtStart, evtEnd, wgtValue))
'''重力变化曲线、事件起止区间'''
fig, ax1 = plt.subplots(figsize=(16, 9), dpi=100)
ax1.plot(times-Start, weights, 'bo-', linewidth=1, markersize=3)
ax1.set_title('Weight (gram)')
for t0, t1, w in evtTimeWeight:
min_diff = float('inf')
index = None
for i, t in enumerate(times):
diff = abs(t0 - t)
if diff < min_diff:
min_diff = diff
index = i
w0 = weights[index]
w1 = w0 + w
ax1.plot((t0, t0) - Start, (w0, w1), 'r*-', linewidth=1, markersize=6)
ax1.plot((t1, t1) - Start, (w0, w1), 'r*-', linewidth=1, markersize=6)
ax1.plot((t0, t1) - Start, (w1, w1), 'r*-', linewidth=1, markersize=6)
ax1.grid(True)
plt.show()
return plt
def main():
tpath = r"D:\全实时\source_data\2024122416"
rltpath = r"D:\全实时\result"
for filename in os.listdir(tpath):
bname = filename.split("_")[0]
if filename.find("_weight.data") <= 0:
continue
wpath = os.path.join(tpath, filename)
plt = event_devide(wpath)
plt.savefig(os.path.join(rltpath, f'{bname}.png' )) # svg, png, pdf
print(filename)
print("Done!")
if __name__ == "__main__":
main()

420
realtime/intrude_detect.py Normal file
View File

@ -0,0 +1,420 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 8 10:07:17 2025
@author: wqg
"""
import csv
import os
import platform
import sys
import pickle
import cv2
import numpy as np
from pathlib import Path
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
from typing import List, Tuple
from scipy.spatial.distance import cdist
from scipy.spatial import ConvexHull
from shapely.geometry import Point, Polygon
##################################################### for method: run_yrt()
FILE = Path(__file__).resolve()
ROOT = FILE.parents[1]
if str(ROOT) not in sys.path:
sys.path.insert(0, str(ROOT))
from track_reid import yolov10_resnet_tracker
from event_time_specify import devide_motion_state
def cross(o: Tuple[float, float], a: Tuple[float, float], b: Tuple[float, float]) -> float:
""" 计算向量 OA × OB 的叉积 """
return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])
def compute_convex_hull(points: List[Tuple[float, float]]) -> List[Tuple[float, float]]:
""" 使用 Andrew's Monotone Chain 算法求二维点集的凸包 """
points = sorted(set(points)) # 排序并去重
if len(points) <= 1:
return points
lower = []
for p in points:
while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:
lower.pop()
lower.append(p)
upper = []
for p in reversed(points):
while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:
upper.pop()
upper.append(p)
# 去掉重复的连接点
return lower[:-1] + upper[:-1]
def is_point_in_convex_hull(point: Tuple[float, float], hull: List[Tuple[float, float]]) -> bool:
""" 判断一个点是否在凸包(含边界)内 """
n = len(hull)
if n < 3:
# 对于点或线段,直接判断是否共线或在线段上
if n == 1:
return point == hull[0]
if n == 2:
a, b = hull
return abs(cross(a, b, point)) < 1e-10 and min(a[0], b[0]) <= point[0] <= max(a[0], b[0]) and min(a[1], b[1]) <= point[1] <= max(a[1], b[1])
return False
for i in range(n):
a = hull[i]
b = hull[(i + 1) % n]
if cross(a, b, point) < -1e-10: # 必须全部在左边或边上
return False
return True
def plot_convex_hull(points: List[Tuple[float, float]], hull: List[Tuple[float, float]], test_points: List[Tuple[float, float]] = None):
x_all, y_all = zip(*points)
fig, ax = plt.subplots()
ax.set_xlim(0, 1024)
ax.set_ylim(1280, 0)
ax.plot(x_all, y_all, 'o', label='Points')
# 凸包闭环线
hull_loop = hull + [hull[0]]
hx, hy = zip(*hull_loop)
ax.plot(hx, hy, 'r-', linewidth=2, label='Convex Hull')
# 如果有测试点
if test_points:
for pt in test_points:
color = 'green' if is_point_in_convex_hull(pt, hull) else 'black'
ax.plot(pt[0], pt[1], 's', color=color, markersize=8)
ax.text(pt[0] + 0.05, pt[1], f'{pt}', fontsize=9)
ax.legend()
ax.grid(True)
plt.title("Convex Hull Visualization")
plt.show()
def convex_scipy():
points = np.array([
[0, 0],
[2, 0],
[1, 1],
[2, 2],
[0, 2],
[1, 0.5]])
hull = ConvexHull(points)
# 凸包顶点的索引
print("凸包顶点索引:{}".format(hull.vertices))
print("凸包顶点坐标:")
for i in hull.vertices:
print(points[i])
# 将凸包坐标构造成 Polygon
hull_points = points[hull.vertices]
polygon = Polygon(hull_points)
# 判断一个点是否在凸包内
p = Point(1, 1) # 示例点
print("是否在凸包内:", polygon.contains(p)) # True or False
def test_convex():
# 测试数据
sample_points = [(0, 0), (1, 1), (2, 2), (2, 0), (0, 2), (1, 0.5)]
convex_hull = compute_convex_hull(sample_points)
# 测试点在凸包内
test_point_inside = (1, 1)
test_point_outside = (3, 3)
test_point_on_edge = (1, 0)
inside = is_point_in_convex_hull(test_point_inside, convex_hull)
outside = is_point_in_convex_hull(test_point_outside, convex_hull)
on_edge = is_point_in_convex_hull(test_point_on_edge, convex_hull)
convex_hull, inside, outside, on_edge
# 展示图像
plot_convex_hull(sample_points, convex_hull, [test_point_inside, test_point_outside, test_point_on_edge])
def array2frame(tboxes):
"tboxes: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]"
idx = np.where(tboxes[:, 6] != 0)[0]
bboxes = tboxes[idx, :]
frameID = np.sort(np.unique(bboxes[:, 7].astype(int)))
fboxes = []
for fid in frameID:
idx = np.where(bboxes[:, 7] == fid)[0]
box = bboxes[idx, :]
fboxes.append(box)
return fboxes
def convex_based(tboxes, width, TH=40):
fboxes = array2frame(tboxes)
fnum = len(fboxes)
fids = np.array([i+1 for i in range(fnum)])[:, np.newaxis]
state = np.zeros((fnum, 1), dtype=np.int64)
frameState = np.concatenate((fids, state), axis = 1).astype(np.int64)
if fnum < width:
return frameState
for idx1 in range(width, fnum+1):
idx0 = idx1 - width
idx = idx1 - width//2 - 1
iboxes = fboxes[:idx]
cboxes = fboxes[idx][:, 0:4]
cur_xy = np.zeros((len(cboxes), 2))
cur_xy[:, 0] = (fboxes[idx][:, 0]+fboxes[idx][:, 2])/2
cur_xy[:, 1] = (fboxes[idx][:, 1]+fboxes[idx][:, 3])/2
for i in range(width//2):
x1, y1, x2, y2 = iboxes[i][:, 0], iboxes[i][:, 1], iboxes[i][:, 2], iboxes[i][:, 3]
boxes = np.array([(x1, y1), (x1, y2), (x2, y1), (x2, y2)]).transpose(0, 2, 1).reshape(-1, 2)
box1 = [(x, y) for x, y in boxes]
convex_hull = compute_convex_hull(box1)
for pt in cur_xy:
inside = is_point_in_convex_hull(pt, convex_hull)
if not inside:
break
if not inside:
break
# Based on the distance between the four corners of the current frame boxes
# and adjacent frame boxes
iboxes = fboxes[idx0:idx] + fboxes[idx+1:idx1]
cboxes = fboxes[idx][:, 0:4]
cx1, cy1, cx2, cy2 = cboxes[:, 0], cboxes[:, 1], cboxes[:, 2], cboxes[:, 3]
cxy = np.array([(cx1, cy1), (cx1, cy2), (cx2, cy1), (cx2, cy2)]).transpose(0, 2, 1).reshape(-1, 2)
iiboxes = np.concatenate(iboxes, axis=0)
ix1, iy1, ix2, iy2 = iiboxes[:, 0], iiboxes[:, 1], iiboxes[:, 2], iiboxes[:, 3]
ixy = np.array([(ix1, iy1), (ix1, iy2), (ix2, iy1), (ix2, iy2)]).transpose(0, 2, 1).reshape(-1, 2)
Dist = cdist(cxy, ixy).round(2)
max_dist = np.max(np.min(Dist, axis=1))
if max_dist > TH and not inside:
frameState[idx, 1] = 1
# plot_convex_hull(boxes, convex_hull, [pt])
frameState[idx, 1] = 1
return frameState
def single_point(tboxes, width, TH=60):
"""width: window width, >=2"""
fboxes = array2frame(tboxes)
fnum = len(fboxes)
fids = np.array([i+1 for i in range(fnum)])[:, np.newaxis]
state = np.zeros((fnum, 1), dtype=np.int64)
frameState = np.concatenate((fids, state), axis = 1).astype(np.int64)
if fnum < width:
return frameState
for idx1 in range(width, fnum+1):
idx0 = idx1 - width
idx = idx1 - width//2 - 1
iboxe1 = fboxes[idx0:idx]
iboxe2 = fboxes[idx+1:idx1]
iboxes = fboxes[idx0:idx] + fboxes[idx+1:idx1]
cboxes = fboxes[idx][:, 0:4]
cur_xy = np.zeros((len(cboxes), 2))
cur_xy[:, 0] = (fboxes[idx][:, 0]+fboxes[idx][:, 2])/2
cur_xy[:, 1] = (fboxes[idx][:, 1]+fboxes[idx][:, 3])/2
Dist = np.empty((len(cboxes), 0))
for i in range(width-1):
boxes = iboxes[i][:, 0:4]
box_xy = np.zeros((len(boxes), 2))
box_xy[:, 0] = (boxes[:, 0]+boxes[:, 2])/2
box_xy[:, 1] = (boxes[:, 1]+boxes[:, 3])/2
dist2 = cdist(cur_xy, box_xy).round(2)
Dist = np.concatenate((Dist, dist2), axis=1)
max_dist = np.max(np.min(Dist, axis=1))
if max_dist > TH:
frameState[idx, 1] = 1
return frameState
def intrude():
pkpath = Path("/home/wqg/dataset/small-goods/pkfiles")
savepath = Path("/home/wqg/dataset/small-goods/illustration_convex")
if not savepath.exists():
savepath.mkdir(parents=True, exist_ok=True)
err_trail, err_single, err_all = [], [], []
num = 0
for pth in pkpath.iterdir():
# item = r"69042386_20250407-145737_front_returnGood_b82d28427666_15_17700000001.pickle"
# pth = pkpath/item
with open(str(pth), 'rb') as f:
yrt = pickle.load(f)
evtname = pth.stem
bboxes = []
trackerboxes = np.empty((0, 10), dtype=np.float64)
for frameDict in yrt:
boxes = frameDict["bboxes"]
tboxes = frameDict["tboxes"]
tboxes = np.concatenate((tboxes, tboxes[:,7][:, None]), axis=1)
bboxes.append(boxes)
trackerboxes = np.concatenate((trackerboxes, np.array(tboxes)), axis=0)
'''single-points based for intrusion detection'''
# wd =5
# fstate1 = single_point(trackerboxes, wd)
'''convex-based '''
width = 5
fstate = convex_based(trackerboxes, width, TH=60)
# fstate = np.zeros(fstate1.shape)
# fstate[:, 0] = fstate1[:, 0]
# fstate[:, 1] = fstate1[:, 1] * fstate2[:, 1]
'''trajectory based for intrusion detection
period: 0 1 2 3
fid timestamp(fid) 基于滑动窗的tid扩展 滑动窗覆盖的运动区间
'''
win_width = 12
period, handState = devide_motion_state(trackerboxes, win_width)
num += 1
if np.all(period[:,2:4]==0):
err_trail.append(evtname)
if np.all(fstate[:,1]==0):
err_single.append(evtname)
if np.all(period[:,2:4]==0) and np.all(fstate[:,1]==0):
err_all.append(evtname)
fig, (ax1, ax2) = plt.subplots(2, 1)
ax1.plot(period[:, 1], period[:, 2], 'bo-', linewidth=1, markersize=4)
ax1.plot(period[:, 1], period[:, 3], 'rx-', linewidth=1, markersize=8)
ax2.plot(fstate[:, 0], fstate[:, 1], 'rx-', linewidth=1, markersize=8)
plt.savefig(os.path.join(str(savepath), f"{evtname}.png"))
plt.close()
# if num==1:
# break
rate_trail = 1 - len(err_trail)/num
rate_single = 1 - len(err_single)/num
rate_all = 1 - len(err_all)/num
print(f"rate_trail: {rate_trail}")
print(f"rate_single: {rate_single}")
print(f"rate_all: {rate_all}")
txtpath = savepath.parents[0] / "error.txt"
with open(str(txtpath), "w") as f:
f.write(f"rate_trail: {rate_trail}" + "\n")
f.write(f"rate_single: {rate_single}" + "\n")
f.write(f"rate_all: {rate_all}" + "\n")
f.write("\n" + "err_trail" + "\n")
for line in err_trail:
f.write(line + "\n")
f.write("\n" + "err_single" + "\n")
for line in err_single:
f.write(line + "\n")
f.write("\n" + "err_all" + "\n")
for line in err_all:
f.write(line + "\n")
print("Done!")
def run_yrt():
datapath = Path("/home/wqg/dataset/small-goods/videos/")
savepath = Path("/home/wqg/dataset/small-goods/result/")
pkpath = Path("/home/wqg/dataset/small-goods/pkfiles/")
if not savepath.exists():
savepath.mkdir(parents=True, exist_ok=True)
if not pkpath.exists():
pkpath.mkdir(parents=True, exist_ok=True)
optdict = {}
optdict["weights"] = ROOT / 'ckpts/best_v10s_width0375_1205.pt'
optdict["is_save_img"] = False
optdict["is_save_video"] = True
k = 0
for pth in datapath.iterdir():
item = "69042386_20250407-145819_back_returnGood_b82d28427666_15_17700000001.mp4"
pth = pth.parents[0] /item
optdict["source"] = pth
optdict["save_dir"] = savepath
# try:
yrtOut = yolov10_resnet_tracker(**optdict)
pkpath_ = pkpath / f"{Path(pth).stem}.pickle"
with open(str(pkpath_), 'wb') as f:
pickle.dump(yrtOut, f)
k += 1
if k==1:
break
# except Exception as e:
# print("abc")
if __name__ == '__main__':
# run_yrt()
intrude()
# test_convex()

544
realtime/time_devide.py Normal file
View File

@ -0,0 +1,544 @@
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 16 17:37:07 2024
@author: ym
"""
# import csv
import os
# import platform
# import sys
from pathlib import Path
import glob
import numpy as np
import copy
import matplotlib.pyplot as plt
from collections import OrderedDict
from event_time_specify import devide_motion_state #, state_measure
import sys
FILE = Path(__file__).resolve()
ROOT = FILE.parents[1] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
from imgs_inference import run_yolo
from tracking.utils.read_data import read_weight_sensor
# IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes
# VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes
def filesort(p):
'''
需将图像文件名标准化
'''
files = []
files.extend(sorted(glob.glob(os.path.join(p, '*.jpg'))))
# images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
tamps_0, tamps_1 = [], []
files_0, files_1 = [], []
for file in files:
basename = os.path.basename(file)
# if basename.find('frameId')<0: continue
f, ext = os.path.splitext(basename)
_, camer, tamp = f.split('_')
if camer == '0':
tamps_0.append(int(tamp))
files_0.append(file)
if camer == '1':
tamps_1.append(int(tamp))
files_1.append(file)
idx0 = sorted(range(len(tamps_0)), key=lambda k: tamps_0[k])
files0 = [files_0[i] for i in idx0]
idx1 = sorted(range(len(tamps_1)), key=lambda k: tamps_1[k])
files1 = [files_1[i] for i in idx1]
return (files0, files1)
def rename(filePath, tmin):
"""
重命名函数fun1
输入:文件夹路径
功能:对文件夹中的全部文件进行随机命名
"""
suffix = '.png' # 设置后缀,筛选特定文件以更改名称
for file in os.listdir(filePath):
if file.endswith(suffix):
name = file.split('.')[0]
tamp = int(name.split('_')[2])-tmin
suffix = file.split('.')[1]
newname = name +f'_{int(tamp)}.'+suffix
os.rename(os.path.join(filePath, file), os.path.join(filePath, newname))
def rerename(filePath=None):
"""
重命名函数fun1
输入:文件夹路径
功能:对文件夹中的全部文件进行随机命名
"""
suffix = '.png' # 设置后缀,筛选特定文件以更改名称
for file in os.listdir(filePath):
if file.endswith(suffix):
name = file.split('.')[0]
names = name.split('_')
if len(names)>=6:
newname = "_".join(names[0:5])+'.png'
os.rename(os.path.join(filePath, file), os.path.join(filePath, newname))
def state_measure(periods, weights, hands, spath=None):
'''
数据类型
后摄: 0, 前摄: 1, CV综合: 2, 重力: 9
frstate_0/1:
帧ID 时间戳 单摄状态1 单摄状态2 活动轨迹标记
0 1 2 3 4:end
time_stream = np.concatenate((tstream, weights))
time_stream:
序列索引号 数据类型 时间戳 单摄状态1/重力值 单摄状态2/重力(t0, t1) CV状态/重力(t0', t1') 综合数据类型 综合状态
0 1 2 3 4 5 6 7
单摄状态1基于运动轨迹的起止点确定的运动区间
单摄状态2: 基于滑动窗口的起止点(窗口终点)确定的运动区间
重力(t0, t1): 重力波动的精确时间区间,基于重力波动的起止点,而不是仅依赖重力稳定时间
重力(t0', t1'): 根据加退购对重力波动窗口进行扩展,扩展应该涵盖购物事件的发生过程
方案:
前后摄状态进行或运算
CV与重力状态进行与运算
'''
# BackType = 0 # 后摄数据类型
# FrontType = 1 # 前摄数据类型
CameraType = 2 # CV数据综合类型
HandType = 3 # 手部类型
WeightType = 9 # 重力数据类型
WeightStableThresh = 7.5 # 单位g重力稳定状态下的最大波动范围
WeightWinWidth = 10 # 单位重力数据点数该值和采样间隔关联重力稳定时间设定为500ms = WeightWinWidth * 采样间隔
CameraTimeInterval = 100 # 单位ms前后摄状态进行或运算时的时间间隔阈值
InputFrontNum = 10 # 重力增加(放入)向前延拓的重力数据点数
InputBackNum = 0 # 重力增加(放入)向后延拓的重力数据点数
OutputFrontNum = 2 # 重力减少(取出)向前延拓的重力数据点数
OutputBackNum = 10 # 重力减少(取出)向前延拓的重力数据点数
CompTimeInterval = 150 # 单位msCV状态和重力状态进行与运算时的时间间隔阈值
'''==================== 1.1 Weight 数据综合并排序 ======================='''
nw = len(weights)
widx = np.array([k for k in range(0, nw)])[:, None]
wtype = WeightType * np.ones((nw, 1))
wstate = np.zeros((nw, 4))
weights = np.concatenate((widx, wtype, weights, wstate), axis=1).astype(np.int64)
weights[:, 6] = WeightType
weights = weights[np.argsort(weights[:, 2]), :]
'''=================== 1.2 基确Weight的状态切割 ========================='''
w_max = np.max(weights[:, 3])
# i0=0
for i2 in range(0, nw):
i1 = max(i2 - WeightWinWidth, 0)
wvalue = weights[i1:i2+1, 3]
wi2 = weights[i2, 3]
wmin = np.min(wvalue)
wmax = np.max(wvalue)
'''对重力波动区间进行标记,并标记最新一次重力稳定值的索引和相应重力值'''
if wmax - wmin > WeightStableThresh:
weights[i2, 4] = w_max
if i2==0:
i0=0
wi0 = weights[i0, 3]
elif i2>0 and weights[i2-1, 4]==0:
i0 = copy.deepcopy(i2)
wi0 = weights[i0, 3]
if i2>0 and weights[i2-1, 4]!=0 and weights[i2, 4]==0:
# 当前稳定状态下的重力值和前一次重力稳定值的差值,确定放入还是取出
if wi2-wi0 > WeightStableThresh:
i00 = max(i0 - InputFrontNum, 0)
i22 = min(i2 + InputBackNum, nw)
elif wi2-wi0 < -1*WeightStableThresh:
i00 = max(i0 - OutputFrontNum, 0)
i22 = min(i2 + OutputBackNum, nw)
else:
i00 = max(i0 - max(InputFrontNum, OutputFrontNum), 0)
i22 = min(i2 + max(InputBackNum, OutputBackNum), nw)
weights[i00:i22, 5] = w_max + 100
'''===================== 2.1 CV 数据综合并排序 =========================='''
BackType, frstate_0 = periods[0]
FrontType, frstate_1 = periods[1]
n0, n1 = len(frstate_0), len(frstate_1)
idx0 = np.array([i for i in range(0, n0)], dtype=np.int64)[:, None]
idx1 = np.array([i for i in range(0, n1)], dtype=np.int64)[:, None]
ctype0 = BackType * np.ones((n0, 1), dtype=np.int64)
ctype1 = FrontType * np.ones((n1, 1), dtype=np.int64)
tstamp0 = frstate_0[:,1][:, None]
tstamp1 = frstate_1[:,1][:, None]
state0 = frstate_0[:,2][:, None]
state00 = frstate_0[:,3][:, None]
state1 = frstate_1[:,2][:, None]
state11 = frstate_1[:,3][:, None]
'''序列索引号, 相机类型,时间戳, 单摄状态1、单摄状态2、CV综合状态、综合数据类型、综合状态
0 1 2 3 4 5 6 7
'''
tstream0 = np.concatenate((idx0, ctype0, tstamp0, state0, state00), axis=1)
tstream1 = np.concatenate((idx1, ctype1, tstamp1, state1, state11), axis=1)
tstream = np.concatenate((tstream0, tstream1), axis=0)
tstream = np.concatenate((tstream, np.zeros((len(tstream), 3), dtype=np.int64)), axis=1)
tstream[:, 6] = CameraType
tstream = tstream[np.argsort(tstream[:, 2]), :]
'''=============== 2.2 基于前后摄运动轨迹起止点确定CV综合状态 ============'''
for i in range(0, len(tstream)):
idx, ctype, stamp, state = tstream[i, :4]
if i==0:
tstream[i, 5] = state
if i>0:
j = i-1
idx0, ctype0, stamp0, state0 = tstream[j, :4]
while stamp-stamp0 < CameraTimeInterval and ctype == ctype0 and j>0:
j -= 1
idx0, ctype0, stamp0, state0 = tstream[j, :4]
'''两摄像头状态的或运算. 由于前后摄图像不同时,如何构造或运算,关键在于选择不同摄像头的对齐点
i时刻摄像头(ctype)状态state另一摄像头(ctype0 != ctype)距 i 最近最近时刻 j 的状态state0
'''
if ctype != ctype0 and state0==1:
tstream[i, 5] = state0
else:
tstream[i, 5] = state
'''================ 3.1 CV、Wweight 数据综合并排序 ======================'''
time_stream = np.concatenate((tstream, weights), axis=0, dtype=np.int64)
time_stream = time_stream[np.argsort(time_stream[:, 2]), :]
tmin = np.min(time_stream[:, 2])
time_stream[:, 2] = time_stream[:, 2] - tmin
'''============== 3.2 基于 CV 和 Weight 确定 Cart 的综合状态 ============'''
for i in range(0, len(time_stream)):
idx, _, stamp, value, _, state, ctype = time_stream[i, :7]
state = min(state, 1)
if i==0:
time_stream[i, 7] = state
if i>0:
j = i-1
idx0, _, stamp0, value0, _, state0, ctype0 = time_stream[j, :7]
while stamp-stamp0 < CompTimeInterval and ctype == ctype0 and j>0:
j -= 1
idx0, _, stamp0, value0, _, state0, ctype0 = time_stream[j, :7]
'''CV与Weight的与运算. 由于CV与Weight不同时如何构造与运算关键在于选择不同数据源的对齐点
i时数据类型(ctype)状态state另一数据类型(ctype0 != ctype)距 i 最近最近时刻 j 的状态state0
'''
if ctype != ctype0 and state !=0 and state0 !=0:
time_stream[i, 7] = 1
MotionSlice, motion_slice = [], []
t0 = time_stream[0, 7]
for i in range(1, len(time_stream)):
f0 = time_stream[i-1, 7]
f1 = time_stream[i, 7]
if f0==0 and f1==1:
t0 = time_stream[i, 2]
elif f0==1 and f1==0:
t1 = time_stream[i, 2]
if t1-t0>100: #ms
MotionSlice.append((t0+tmin, t1+tmin))
motion_slice.append((t0, t1))
else:
print(f"T0: {t0}, T1: {t1}")
'''===================== 4. Hands数据综合并排序 =========================='''
BackType, hdstate_0 = hands[0]
FrontType, hdstate_1 = hands[1]
n0, n1 = len(hdstate_0), len(hdstate_1)
idx0 = np.array([i for i in range(0, n0)], dtype=np.int64)[:, None]
idx1 = np.array([i for i in range(0, n1)], dtype=np.int64)[:, None]
ctype0 = BackType * np.ones((n0, 1), dtype=np.int64)
ctype1 = FrontType * np.ones((n1, 1), dtype=np.int64)
hstamp0 = hdstate_0[:,1][:, None]
hstamp1 = hdstate_1[:,1][:, None]
state0 = hdstate_0[:,2][:, None]
state1 = hdstate_1[:,2][:, None]
'''序列索引号, 相机类型,时间戳, 单摄手部状态、手部综合状态、保留位2、综合数据类型、综合状态
0 1 2 3 4 5 6 7
'''
hstream0 = np.concatenate((idx0, ctype0, hstamp0, state0), axis=1)
hstream1 = np.concatenate((idx1, ctype1, hstamp1, state1), axis=1)
hstream = np.concatenate((hstream0, hstream1), axis=0)
hstream = np.concatenate((hstream, np.zeros((len(hstream), 4), dtype=np.int64)), axis=1)
hstream[:, 6] = HandType
hstream = hstream[np.argsort(hstream[:, 2]), :]
for i in range(0, len(hstream)):
idx, ctype, stamp, state = hstream[i, :4]
if i==0:
hstream[i, 4] = state
if i>0:
j = i-1
idx0, ctype0, stamp0, state0 = hstream[j, :4]
while stamp-stamp0 < CameraTimeInterval and ctype == ctype0 and j>0:
j -= 1
idx0, ctype0, stamp0, state0 = hstream[j, :4]
'''两摄像头状态的或运算. 由于前后摄图像不同时,如何构造或运算,关键在于选择不同摄像头的对齐点
i时刻摄像头(ctype)状态state另一摄像头(ctype0 != ctype)距 i 最近最近时刻 j 的状态state0
'''
if ctype != ctype0 and state0==2:
hstream[i, 4] = state0
elif ctype != ctype0 and state0==1:
hstream[i, 4] = state0
else:
hstream[i, 4] = state
'''========================== 5 结果显示 ================================'''
frstate_0[:, 1] = frstate_0[:, 1]-tmin
frstate_1[:, 1] = frstate_1[:, 1]-tmin
tstream[:, 2] = tstream[:, 2]-tmin
fig, (ax1, ax2, ax3, ax4, ax5, ax6) = plt.subplots(6, 1)
during = np.max(time_stream[:, 2])
ax1.plot(weights[:, 2]-tmin, weights[:, 3], 'bo-', linewidth=1, markersize=4)
ax1.plot(weights[:, 2]-tmin, weights[:, 4], 'mx-', linewidth=1, markersize=4)
ax1.plot(weights[:, 2]-tmin, weights[:, 5], 'gx-', linewidth=1, markersize=4)
ax1.set_xlim([0, during])
ax1.set_title('Weight (gram)')
ax2.plot(frstate_0[:, 1], frstate_0[:, 3], 'rx-', linewidth=1, markersize=8)
ax2.plot(frstate_0[:, 1], frstate_0[:, 2], 'bo-', linewidth=1, markersize=4)
ax2.set_xlim([0, during])
ax2.set_title('Back Camera')
ax3.plot(frstate_1[:, 1], frstate_1[:, 3], 'rx-', linewidth=1, markersize=8)
ax3.plot(frstate_1[:, 1], frstate_1[:, 2], 'bo-', linewidth=1, markersize=4)
ax3.set_xlim([0, during])
ax3.set_title('Front Camera')
ax4.plot(tstream[:, 2], tstream[:, 5], 'bx-', linewidth=1, markersize=4)
ax4.set_xlim([0, during])
ax4.set_title('CV State')
ax5.plot(time_stream[:, 2], time_stream[:, 7], 'gx-', linewidth=1, markersize=4)
ax5.set_xlim([0, during])
ax5.set_title('Cart State')
ax6.plot(hstream[:, 2]-tmin, hstream[:, 4], 'gx-', linewidth=1, markersize=4)
ax6.set_xlim([0, during])
ax6.set_title('Hand State')
plt.show()
if spath:
plt.savefig(spath)
return tmin, MotionSlice
def splitevent(imgpath, MotionSlice):
suffix = '.png'
imgfiles = [f for f in os.listdir(imgpath) if f.endswith(suffix)]
timestamp = np.array([int(f.split('_')[2]) for f in imgfiles])
indexes = []
k = 0
for t0, t1 in MotionSlice:
idx0 = set(np.where(timestamp >= t0)[0])
idx1 = set(np.where(timestamp <= t1)[0])
idx2 = list(idx0.intersection(idx1))
files = [imgfiles[i] for i in idx2]
for filename in files:
file, ext = os.path.splitext(filename)
newname = file + f'_{k}.png'
os.rename(os.path.join(imgpath, filename), os.path.join(imgpath, newname))
k += 1
print("Done!")
def runyolo():
eventdirs = r"\\192.168.1.28\share\个人文件\wqg\realtime\eventdata"
savedir = r"\\192.168.1.28\share\个人文件\wqg\realtime\result"
k = 0
for edir in os.listdir(eventdirs):
edir = "1731316835560"
source = os.path.join(eventdirs, edir)
files = filesort(source)
for flist in files:
run_yolo(flist, savedir)
k += 1
if k==1:
break
def run_tracking(trackboxes, MotionSlice):
pass
def read_wsensor(filepath):
WeightDict = OrderedDict()
with open(filepath, 'r', encoding='utf-8') as f:
lines = f.readlines()
clean_lines = [line.strip().replace("'", '').replace('"', '') for line in lines]
for i, line in enumerate(clean_lines):
line = line.strip()
line = line.strip()
if line.find(':') < 0: continue
# if line.find("Weight") >= 0:
# label = "Weight"
# continue
keyword = line.split(':')[0]
value = line.split(':')[1]
# if label == "Weight":
if len(keyword) and len(value):
vdata = [float(s) for s in value.split(',') if len(s)]
WeightDict[keyword] = vdata[-1]
weights = [(float(t), w) for t, w in WeightDict.items()]
weights = np.array(weights).astype(np.int64)
return weights
def show_seri():
datapath = r"\\192.168.1.28\share\个人文件\wqg\realtime\eventdata\1731316835560"
savedir = r"\\192.168.1.28\share\个人文件\wqg\realtime\1"
imgdir = datapath.split('\\')[-2] + "_" + datapath.split('\\')[-1]
imgpath = os.path.join(savedir, imgdir)
eventname = Path(datapath).stem
datafiles = sorted(glob.glob(os.path.join(datapath, '*.npy')))
periods, trackboxes, hands = [], [], []
win_width = 12
for npypath in datafiles:
CameraType = Path(npypath).stem.split('_')[-1]
tkboxes = np.load(npypath)
trackboxes.append((CameraType, tkboxes))
period, handState = devide_motion_state(tkboxes, win_width)
periods.append((int(CameraType), period))
hands.append((int(CameraType), handState))
'''===============读取重力信号数据==================='''
seneorfile = os.path.join(datapath, 'sensor.txt')
weights = read_wsensor(seneorfile)
# weights = [(float(t), w) for t, w in WeightDict.items()]
# weights = np.array(weights)
'''===============重力、图像信息融合==================='''
spath = os.path.join(savedir, f"{eventname}.png" )
tmin, MotionSlice = state_measure(periods, weights, hands, spath)
# 第一次运行时用于更改图像文件名
# rerename(imgpath)
# rename(imgpath, tmin)
# splitevent(imgpath, MotionSlice)
def main():
# runyolo()
show_seri()
if __name__ == '__main__':
main()
# imgpaths = r"\\192.168.1.28\share\realtime\result\eventdata_1728978106733"
# rerename(imgpaths)

281
realtime/tracker_test.py Normal file
View File

@ -0,0 +1,281 @@
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 2 14:15:57 2025
@author: ym
"""
import numpy as np
import cv2
import os
from pathlib import Path
import sys
sys.path.append(r"D:\DetectTracking")
# from tracking.utils.read_data import extract_data_realtime, read_tracking_output_realtime
from tracking.utils.plotting import Annotator, colors
from tracking.utils import Boxes, IterableSimpleNamespace, yaml_load, boxes_add_fid
from tracking.trackers import BOTSORT, BYTETracker
from tracking.utils.showtrack import drawtracks
from hands.hand_inference import hand_pose
from tracking.utils.read_data import read_weight_sensor, extract_data_realtime, read_tracking_output_realtime
from contrast.feat_extract.config import config as conf
from contrast.feat_extract.inference import FeatsInterface
from tracking.utils.drawtracks import drawTrack
ReIDEncoder = FeatsInterface(conf)
W, H = 1024, 1280
Mode = 'front' #'back'
ImgFormat = ['.jpg', '.jpeg', '.png', '.bmp']
'''调用tracking()函数,利用本地跟踪算法获取各目标轨迹,可以比较本地跟踪算法与现场跟踪算法的区别。'''
def init_tracker(tracker_yaml = None, bs=1):
"""
Initialize tracker for object tracking during prediction.
"""
TRACKER_MAP = {'bytetrack': BYTETracker, 'botsort': BOTSORT}
cfg = IterableSimpleNamespace(**yaml_load(tracker_yaml))
tracker = TRACKER_MAP[cfg.tracker_type](args=cfg, frame_rate=30)
return tracker
def init_trackers(tracker_yaml = None, bs=1):
"""
Initialize trackers for object tracking during prediction.
"""
# tracker_yaml = r"./tracking/trackers/cfg/botsort.yaml"
TRACKER_MAP = {'bytetrack': BYTETracker, 'botsort': BOTSORT}
cfg = IterableSimpleNamespace(**yaml_load(tracker_yaml))
trackers = []
for _ in range(bs):
tracker = TRACKER_MAP[cfg.tracker_type](args=cfg, frame_rate=30)
trackers.append(tracker)
return trackers
def draw_box(img, tracks):
annotator = Annotator(img.copy(), line_width=2)
# for *xyxy, conf, cls in reversed(tracks):
# name = f'{int(cls)} {conf:.2f}'
# color = colors(int(cls), True)
# annotator.box_label(xyxy, name, color=color)
for *xyxy, id, conf, cls, fid, bid in reversed(tracks):
name = f'ID:{int(id)} {int(cls)} {conf:.2f}'
color = colors(int(cls), True)
annotator.box_label(xyxy, name, color=color)
im0 = annotator.result()
return im0
def tracking(bboxes, ffeats):
tracker_yaml = "./tracking/trackers/cfg/botsort.yaml"
tracker = init_tracker(tracker_yaml)
TrackBoxes = np.empty((0, 9), dtype = np.float32)
TracksDict = {}
frmIds = []
'''========================== 执行跟踪处理 ============================='''
# dets 与 feats 应保持严格对应
k=0
for dets, feats in zip(bboxes, ffeats):
frmIds.append(np.unique(dets[:, 6]).astype(np.int64)[0])
boxes = dets[:, :6]
det_tracking = Boxes(boxes).cpu().numpy()
tracks, outfeats = tracker.update(det_tracking, features=feats)
'''tracks: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]
0 1 2 3 4 5 6 7 8
这里frame_index 也可以用视频的 帧ID 代替, box_index 保持不变
'''
k += 1
imgpath = r"D:\全实时\202502\tracker\Yolos_Tracking\tracker\1_1740891284792\1_1740891284792_{}.png".format(int(k))
img = cv2.imread(imgpath)
im0 = draw_box(img, tracks)
savepath = r"D:\全实时\202502\tracker\Yolos_Tracking\tracker\1_1740891284792\b\1_1740891284792_{}_b.png".format(k)
cv2.imwrite(savepath, im0)
if len(tracks):
TrackBoxes = np.concatenate([TrackBoxes, tracks], axis=0)
# =============================================================================
# FeatDict = {}
# for track in tracks:
# tid = int(track[8])
# FeatDict.update({tid: feats[tid, :]})
#
# frameID = tracks[0, 7]
#
# # print(f"frameID: {int(frameID)}")
# assert len(tracks) == len(FeatDict), f"Please check the func: tracker.update() at frameID({int(frameID)})"
#
# TracksDict[f"frame_{int(frameID)}"] = {"feats":FeatDict}
# =============================================================================
return TrackBoxes, TracksDict
def dotrack():
datapath = r"D:\全实时\202502\tracker\1_tracker_in.data"
bboxes, ffeats = extract_data_realtime(datapath)
trackerboxes, tracker_feat_dict = tracking(bboxes, ffeats)
print("done!")
# def plotbox():
# fpath = r"\\192.168.1.28\share\测试视频数据以及日志\全实时测试\V12\2025-3-3\20250303-103833-338_6928804010091_6928804010091\1_tracking_output.data"
# imgpath = r"D:\全实时\202502\result\Yolos_Tracking\20250303-103833-338_6928804010091_6928804010091\1_1740969517953"
# trackingboxes, trackingfeats, tracking_outboxes, tracking_outfeats = read_tracking_output_realtime(fpath)
# for *xyxy, id, conf, cls, fid, bid in tracking_outboxes[0]:
# imgname = f"1_1740969517953_{int(fid)}.png"
# img_path = os.path.join(imgpath, imgname)
# img = cv2.imread(img_path)
# annotator = Annotator(img.copy(), line_width=2)
# name = f'ID:{int(id)} {int(cls)} {conf:.2f}'
# color = colors(int(cls), True)
# annotator.box_label(xyxy, name, color=color)
# im0 = annotator.result()
# cv2.imwrite(os.path.join(imgpath, f"1_1740969517953_{int(fid)}_.png"), im0)
# print(f"1_1740969676295_{int(fid)}_.png")
# print("done")
def video2imgs(videopath):
cap = cv2.VideoCapture(str(videopath))
k = 0
while True:
ret, frame = cap.read()
if frame is None:
break
k += 1
imgpath = videopath.parent / f"{videopath.stem}_{k}.png"
cv2.imwrite(str(imgpath), frame)
def extract_evtimgs(evtpath):
vidpaths = [v for v in evtpath.iterdir() if v.suffix == '.mp4']
for vidpath in vidpaths:
video2imgs(vidpath)
stamps = [name.stem.split('_')[1] for name in vidpaths]
if len(set(stamps)==1):
return stamps[0]
return None
def draw_tracking_boxes(evtpath, stamp):
for datapath in evtpath.iterdir():
if datapath.name.find('_tracking_output.data')<=0:
continue
camera = datapath.stem.split('_')[0]
trackingboxes, trackingfeats, tracking_outboxes, tracking_outfeats = read_tracking_output_realtime(str(datapath))
## 该模块先读取轨迹数据再根据帧ID读取相应图像
for *xyxy, id, conf, cls, fid, bid in tracking_outboxes[0]:
imgpath = evtpath / f"{camera}_{stamp}_{int(fid)}.png"
img = cv2.imread(str(imgpath))
annotator = Annotator(img.copy(), line_width=2)
name = f'ID:{int(id)} {int(cls)} {conf:.2f}'
color = colors(int(cls), True)
annotator.box_label(xyxy, name, color=color)
im0 = annotator.result()
cv2.imwrite(imgpath, im0)
print(datapath.name)
def draw_traj(evtpath):
for datapath in evtpath.iterdir():
if datapath.name.find('_tracking_output.data')<=0:
continue
fname = datapath.name
trackingboxes, trackingfeats, tracking_outboxes, tracking_outfeats = read_tracking_output_realtime(datapath)
CamerType = fname.split('_')[0]
if CamerType == '1':
edgeline = cv2.imread("./CartTemp/board_ftmp_line.png")
if CamerType == '0':
edgeline = cv2.imread("./CartTemp/edgeline.png")
edgeline = drawTrack(tracking_outboxes, edgeline)
imgpath = datapath.parent / f"{datapath.stem}.png"
cv2.imwrite(str(imgpath), edgeline)
def main():
path = r"\\192.168.1.28\share\测试视频数据以及日志\全实时测试\V12\2025-3-3\20250303-104225-381_6920459958674"
evtpaths = [p for p in Path(path).iterdir() if p.is_dir()]
for evtpath in evtpaths:
#1. 从事件的前后摄视频提取图像
stamp = extract_evtimgs(evtpath)
#2. 根据 0/1_tracking_output.data 中提取的轨迹在img中绘制box
draw_tracking_boxes(evtpath, stamp)
#3. 根据 0/1_tracking_output.data 中提取的轨迹在edgeline中绘制box
draw_traj(evtpath)
if __name__ == '__main__':
# dotrack()
# plotbox()
vpath = r"D:\datasets\ym\VID_20250307_105606"
extract_evtimgs(Path(vpath))