增加了单帧入侵判断及yoloV10
This commit is contained in:
Binary file not shown.
BIN
__pycache__/move_detect.cpython-312.pyc
Normal file
BIN
__pycache__/move_detect.cpython-312.pyc
Normal file
Binary file not shown.
BIN
__pycache__/pipeline_01.cpython-312.pyc
Normal file
BIN
__pycache__/pipeline_01.cpython-312.pyc
Normal file
Binary file not shown.
Binary file not shown.
@ -19,25 +19,6 @@ from tracking.utils.drawtracks import plot_frameID_y2, draw_all_trajectories
|
||||
from utils.getsource import get_image_pairs, get_video_pairs
|
||||
from tracking.utils.read_data import read_similar
|
||||
|
||||
class CameraEvent_:
|
||||
def __init__(self):
|
||||
self.cameraType = '', # "front", "back"
|
||||
self.videoPath = '',
|
||||
self.imagePaths = [],
|
||||
self.yoloResnetTracker =[],
|
||||
self.tracking = None,
|
||||
|
||||
class ShoppingEvent_:
|
||||
def __init__(self):
|
||||
self.eventPath = ''
|
||||
self.eventName = ''
|
||||
self.barcode = ''
|
||||
self.eventType = '', # "input", "output", "other"
|
||||
self.frontCamera = None
|
||||
self.backCamera = None
|
||||
self.one2n = []
|
||||
|
||||
|
||||
|
||||
def save_subimgs(imgdict, boxes, spath, ctype, featdict = None):
|
||||
'''
|
||||
@ -79,159 +60,49 @@ def save_subimgs_1(imgdict, boxes, spath, ctype, simidict = None):
|
||||
|
||||
cv2.imwrite(imgpath, img)
|
||||
|
||||
def show_result(event_tracks, yrtDict, savepath_pipe):
|
||||
'''保存 Tracking 输出的运动轨迹子图,并记录相似度'''
|
||||
|
||||
savepath_pipe_subimgs = savepath_pipe / Path("subimgs")
|
||||
if not savepath_pipe_subimgs.exists():
|
||||
savepath_pipe_subimgs.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
|
||||
|
||||
for CamerType, vts in event_tracks:
|
||||
if len(vts.tracks)==0: continue
|
||||
if CamerType == 'front':
|
||||
# yolos = ShoppingDict["frontCamera"]["yoloResnetTracker"]
|
||||
|
||||
yolos = yrtDict["frontyrt"]
|
||||
ctype = 1
|
||||
if CamerType == 'back':
|
||||
# yolos = ShoppingDict["backCamera"]["yoloResnetTracker"]
|
||||
|
||||
yolos = yrtDict["backyrt"]
|
||||
ctype = 0
|
||||
|
||||
imgdict, featdict, simidict = {}, {}, {}
|
||||
for y in yolos:
|
||||
imgdict.update(y["imgs"])
|
||||
featdict.update(y["feats"])
|
||||
simidict.update(y["featsimi"])
|
||||
|
||||
for track in vts.Residual:
|
||||
if isinstance(track, np.ndarray):
|
||||
save_subimgs(imgdict, track, savepath_pipe_subimgs, ctype, featdict)
|
||||
else:
|
||||
save_subimgs(imgdict, track.slt_boxes, savepath_pipe_subimgs, ctype, featdict)
|
||||
|
||||
'''(3) 轨迹显示与保存'''
|
||||
illus = [None, None]
|
||||
for CamerType, vts in event_tracks:
|
||||
if len(vts.tracks)==0: continue
|
||||
|
||||
if CamerType == 'front':
|
||||
edgeline = cv2.imread("./tracking/shopcart/cart_tempt/board_ftmp_line.png")
|
||||
|
||||
h, w = edgeline.shape[:2]
|
||||
# nh, nw = h//2, w//2
|
||||
# edgeline = cv2.resize(edgeline, (nw, nh), interpolation=cv2.INTER_AREA)
|
||||
|
||||
img_tracking = draw_all_trajectories(vts, edgeline, savepath_pipe, CamerType, draw5p=True)
|
||||
illus[0] = img_tracking
|
||||
|
||||
plt = plot_frameID_y2(vts)
|
||||
plt.savefig(os.path.join(savepath_pipe, "front_y2.png"))
|
||||
|
||||
if CamerType == 'back':
|
||||
edgeline = cv2.imread("./tracking/shopcart/cart_tempt/edgeline.png")
|
||||
|
||||
h, w = edgeline.shape[:2]
|
||||
# nh, nw = h//2, w//2
|
||||
# edgeline = cv2.resize(edgeline, (nw, nh), interpolation=cv2.INTER_AREA)
|
||||
|
||||
img_tracking = draw_all_trajectories(vts, edgeline, savepath_pipe, CamerType, draw5p=True)
|
||||
illus[1] = img_tracking
|
||||
|
||||
illus = [im for im in illus if im is not None]
|
||||
if len(illus):
|
||||
img_cat = np.concatenate(illus, axis = 1)
|
||||
if len(illus)==2:
|
||||
H, W = img_cat.shape[:2]
|
||||
cv2.line(img_cat, (int(W/2), 0), (int(W/2), int(H)), (128, 128, 255), 3)
|
||||
|
||||
trajpath = os.path.join(savepath_pipe, "trajectory.png")
|
||||
cv2.imwrite(trajpath, img_cat)
|
||||
|
||||
|
||||
|
||||
|
||||
def pipeline(eventpath,
|
||||
def pipeline(
|
||||
eventpath,
|
||||
savepath,
|
||||
SourceType,
|
||||
weights,
|
||||
DataType = "raw", #raw, pkl: images or videos, pkl, pickle file
|
||||
YoloVersion="V5",
|
||||
savepath = None,
|
||||
saveimages = True
|
||||
YoloVersion="V5"
|
||||
):
|
||||
'''
|
||||
eventpath: 单个事件的存储路径
|
||||
|
||||
'''
|
||||
optdict = {}
|
||||
optdict["weights"] = weights
|
||||
|
||||
if SourceType == "video":
|
||||
vpaths = get_video_pairs(eventpath)
|
||||
elif SourceType == "image":
|
||||
vpaths = get_image_pairs(eventpath)
|
||||
event_tracks = []
|
||||
|
||||
## 构造购物事件字典
|
||||
evtname = Path(eventpath).stem
|
||||
barcode = evtname.split('_')[-1] if len(evtname.split('_'))>=2 \
|
||||
and len(evtname.split('_')[-1])>=8 \
|
||||
and evtname.split('_')[-1].isdigit() else ''
|
||||
|
||||
'''事件结果存储文件夹: savepath_pipe, savepath_pkl'''
|
||||
'''事件结果存储文件夹'''
|
||||
if not savepath:
|
||||
savepath = Path(__file__).resolve().parents[0] / "events_result"
|
||||
savepath_pipe = Path(savepath) / Path("yolos_tracking") / evtname
|
||||
|
||||
savepath_pipeline = Path(savepath) / Path("Yolos_Tracking") / evtname
|
||||
|
||||
|
||||
savepath_pkl = Path(savepath) / "shopping_pkl"
|
||||
if not savepath_pkl.exists():
|
||||
savepath_pkl.mkdir(parents=True, exist_ok=True)
|
||||
pklpath = Path(savepath_pkl) / Path(str(evtname)+".pickle")
|
||||
"""ShoppingDict pickle 文件保存地址 """
|
||||
savepath_spdict = Path(savepath) / "ShoppingDict_pkfile"
|
||||
if not savepath_spdict.exists():
|
||||
savepath_spdict.mkdir(parents=True, exist_ok=True)
|
||||
pf_path = Path(savepath_spdict) / Path(str(evtname)+".pickle")
|
||||
|
||||
yrtDict = {}
|
||||
|
||||
yrt_out = []
|
||||
if DataType == "raw":
|
||||
### 不重复执行已经过yolo-resnet-tracker
|
||||
# if pklpath.exists():
|
||||
# if pf_path.exists():
|
||||
# print(f"Pickle file have saved: {evtname}.pickle")
|
||||
# return
|
||||
|
||||
if SourceType == "video":
|
||||
vpaths = get_video_pairs(eventpath)
|
||||
elif SourceType == "image":
|
||||
vpaths = get_image_pairs(eventpath)
|
||||
|
||||
|
||||
|
||||
for vpath in vpaths:
|
||||
'''================= 2. 事件结果存储文件夹 ================='''
|
||||
|
||||
|
||||
if isinstance(vpath, list):
|
||||
savepath_pipe_imgs = savepath_pipe / Path("images")
|
||||
else:
|
||||
savepath_pipe_imgs = savepath_pipe / Path(str(Path(vpath).stem))
|
||||
|
||||
if not savepath_pipe_imgs.exists():
|
||||
savepath_pipe_imgs.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
optdict = {}
|
||||
optdict["weights"] = weights
|
||||
optdict["source"] = vpath
|
||||
optdict["save_dir"] = savepath_pipe_imgs
|
||||
optdict["is_save_img"] = saveimages
|
||||
optdict["is_save_video"] = True
|
||||
|
||||
|
||||
if YoloVersion == "V5":
|
||||
yrtOut = yolo_resnet_tracker(**optdict)
|
||||
elif YoloVersion == "V10":
|
||||
yrtOut = yolov10_resnet_tracker(**optdict)
|
||||
|
||||
yrt_out.append((vpath, yrtOut))
|
||||
|
||||
elif DataType == "pkl":
|
||||
pass
|
||||
|
||||
else:
|
||||
return
|
||||
|
||||
|
||||
|
||||
'''====================== 构造 ShoppingDict 模块 ======================='''
|
||||
ShoppingDict = {"eventPath": eventpath,
|
||||
"eventName": evtname,
|
||||
@ -241,13 +112,16 @@ def pipeline(eventpath,
|
||||
"backCamera": {},
|
||||
"one2n": [] #
|
||||
}
|
||||
yrtDict = {}
|
||||
|
||||
|
||||
procpath = Path(eventpath).joinpath('process.data')
|
||||
if procpath.is_file():
|
||||
SimiDict = read_similar(procpath)
|
||||
ShoppingDict["one2n"] = SimiDict['one2n']
|
||||
|
||||
event_tracks = []
|
||||
for vpath, yrtOut in yrt_out:
|
||||
|
||||
for vpath in vpaths:
|
||||
'''================= 1. 构造相机事件字典 ================='''
|
||||
CameraEvent = {"cameraType": '', # "front", "back"
|
||||
"videoPath": '',
|
||||
@ -267,9 +141,33 @@ def pipeline(eventpath,
|
||||
if bname.split('_')[0] == "1" or bname.find('front')>=0:
|
||||
CameraEvent["cameraType"] = "front"
|
||||
|
||||
'''================= 2. 事件结果存储文件夹 ================='''
|
||||
if isinstance(vpath, list):
|
||||
savepath_pipeline_imgs = savepath_pipeline / Path("images")
|
||||
else:
|
||||
savepath_pipeline_imgs = savepath_pipeline / Path(str(Path(vpath).stem))
|
||||
|
||||
if not savepath_pipeline_imgs.exists():
|
||||
savepath_pipeline_imgs.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
savepath_pipeline_subimgs = savepath_pipeline / Path("subimgs")
|
||||
if not savepath_pipeline_subimgs.exists():
|
||||
savepath_pipeline_subimgs.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
'''================= 3. Yolo + Resnet + Tracker ================='''
|
||||
optdict["source"] = vpath
|
||||
optdict["save_dir"] = savepath_pipeline_imgs
|
||||
optdict["is_save_img"] = True
|
||||
optdict["is_save_video"] = True
|
||||
|
||||
|
||||
if YoloVersion == "V5":
|
||||
yrtOut = yolo_resnet_tracker(**optdict)
|
||||
elif YoloVersion == "V10":
|
||||
yrtOut = yolov10_resnet_tracker(**optdict)
|
||||
|
||||
|
||||
'''2种保存方式: (1) save images, (2) no save images'''
|
||||
### (1) save images
|
||||
yrtOut_save = []
|
||||
for frdict in yrtOut:
|
||||
fr_dict = {}
|
||||
@ -279,7 +177,6 @@ def pipeline(eventpath,
|
||||
yrtOut_save.append(fr_dict)
|
||||
CameraEvent["yoloResnetTracker"] = yrtOut_save
|
||||
|
||||
### (2) no save images
|
||||
# CameraEvent["yoloResnetTracker"] = yrtOut
|
||||
|
||||
'''================= 4. tracking ================='''
|
||||
@ -322,58 +219,108 @@ def pipeline(eventpath,
|
||||
yrtDict["frontyrt"] = yrtOut
|
||||
|
||||
'''========================== 保存模块 ================================='''
|
||||
# 保存 ShoppingDict
|
||||
with open(str(pklpath), 'wb') as f:
|
||||
'''(1) 保存 ShoppingDict 事件'''
|
||||
with open(str(pf_path), 'wb') as f:
|
||||
pickle.dump(ShoppingDict, f)
|
||||
|
||||
# 绘制并保存轨迹图
|
||||
show_result(event_tracks, yrtDict, savepath_pipe)
|
||||
'''(2) 保存 Tracking 输出的运动轨迹子图,并记录相似度'''
|
||||
for CamerType, vts in event_tracks:
|
||||
if len(vts.tracks)==0: continue
|
||||
if CamerType == 'front':
|
||||
# yolos = ShoppingDict["frontCamera"]["yoloResnetTracker"]
|
||||
|
||||
yolos = yrtDict["frontyrt"]
|
||||
ctype = 1
|
||||
if CamerType == 'back':
|
||||
# yolos = ShoppingDict["backCamera"]["yoloResnetTracker"]
|
||||
|
||||
yolos = yrtDict["backyrt"]
|
||||
ctype = 0
|
||||
|
||||
imgdict, featdict, simidict = {}, {}, {}
|
||||
for y in yolos:
|
||||
imgdict.update(y["imgs"])
|
||||
featdict.update(y["feats"])
|
||||
simidict.update(y["featsimi"])
|
||||
|
||||
for track in vts.Residual:
|
||||
if isinstance(track, np.ndarray):
|
||||
save_subimgs(imgdict, track, savepath_pipeline_subimgs, ctype, featdict)
|
||||
else:
|
||||
save_subimgs(imgdict, track.slt_boxes, savepath_pipeline_subimgs, ctype, featdict)
|
||||
|
||||
'''(3) 轨迹显示与保存'''
|
||||
illus = [None, None]
|
||||
for CamerType, vts in event_tracks:
|
||||
if len(vts.tracks)==0: continue
|
||||
|
||||
if CamerType == 'front':
|
||||
edgeline = cv2.imread("./tracking/shopcart/cart_tempt/board_ftmp_line.png")
|
||||
|
||||
h, w = edgeline.shape[:2]
|
||||
# nh, nw = h//2, w//2
|
||||
# edgeline = cv2.resize(edgeline, (nw, nh), interpolation=cv2.INTER_AREA)
|
||||
|
||||
img_tracking = draw_all_trajectories(vts, edgeline, savepath_pipeline, CamerType, draw5p=True)
|
||||
illus[0] = img_tracking
|
||||
|
||||
plt = plot_frameID_y2(vts)
|
||||
plt.savefig(os.path.join(savepath_pipeline, "front_y2.png"))
|
||||
|
||||
if CamerType == 'back':
|
||||
edgeline = cv2.imread("./tracking/shopcart/cart_tempt/edgeline.png")
|
||||
|
||||
h, w = edgeline.shape[:2]
|
||||
# nh, nw = h//2, w//2
|
||||
# edgeline = cv2.resize(edgeline, (nw, nh), interpolation=cv2.INTER_AREA)
|
||||
|
||||
img_tracking = draw_all_trajectories(vts, edgeline, savepath_pipeline, CamerType, draw5p=True)
|
||||
illus[1] = img_tracking
|
||||
|
||||
illus = [im for im in illus if im is not None]
|
||||
if len(illus):
|
||||
img_cat = np.concatenate(illus, axis = 1)
|
||||
if len(illus)==2:
|
||||
H, W = img_cat.shape[:2]
|
||||
cv2.line(img_cat, (int(W/2), 0), (int(W/2), int(H)), (128, 128, 255), 3)
|
||||
|
||||
trajpath = os.path.join(savepath_pipeline, "trajectory.png")
|
||||
cv2.imwrite(trajpath, img_cat)
|
||||
|
||||
def execute_pipeline(evtdir = r"D:\datasets\ym\后台数据\unzip",
|
||||
DataType = "raw", # raw, pkl
|
||||
save_path = r"D:\work\result_pipeline",
|
||||
kk=1,
|
||||
source_type = "video", # video, image,
|
||||
save_path = r"D:\work\result_pipeline",
|
||||
yolo_ver = "V10", # V10, V5
|
||||
|
||||
weight_yolo_v5 = r'./ckpts/best_cls10_0906.pt' ,
|
||||
weight_yolo_v10 = r'./ckpts/best_v10s_width0375_1205.pt',
|
||||
saveimages = True
|
||||
k=0
|
||||
):
|
||||
'''
|
||||
运行函数 pipeline(),遍历事件文件夹,每个文件夹是一个事件
|
||||
'''
|
||||
parmDict = {}
|
||||
parmDict["DataType"] = DataType
|
||||
parmDict["savepath"] = save_path
|
||||
parmDict["SourceType"] = source_type
|
||||
|
||||
parmDict["savepath"] = save_path
|
||||
parmDict["YoloVersion"] = yolo_ver
|
||||
if parmDict["YoloVersion"] == "V5":
|
||||
parmDict["weights"] = weight_yolo_v5
|
||||
elif parmDict["YoloVersion"] == "V10":
|
||||
parmDict["weights"] = weight_yolo_v10
|
||||
|
||||
parmDict["saveimages"] = saveimages
|
||||
|
||||
|
||||
evtdir = Path(evtdir)
|
||||
errEvents = []
|
||||
k = 0
|
||||
for item in evtdir.iterdir():
|
||||
if item.is_dir():
|
||||
item = evtdir/Path("20250310-175352-741")
|
||||
parmDict["eventpath"] = item
|
||||
|
||||
pipeline(**parmDict)
|
||||
# try:
|
||||
# pipeline(**parmDict)
|
||||
# except Exception as e:
|
||||
# errEvents.append(str(item))
|
||||
|
||||
k+=1
|
||||
if kk is not None and k==kk:
|
||||
if k==1:
|
||||
break
|
||||
|
||||
errfile = os.path.join(parmDict["savepath"], 'error_events.txt')
|
||||
@ -389,6 +336,23 @@ if __name__ == "__main__":
|
||||
# execute_pipeline(save_path=spath_v10, yolo_ver="V10")
|
||||
# execute_pipeline(save_path=spath_v5, yolo_ver="V5")
|
||||
|
||||
datapath = r'/home/wqg/dataset/test_dataset/base_dataset/single_event/source/'
|
||||
savepath = r'/home/wqg/dataset/pipeline/contrast/single_event_V5'
|
||||
|
||||
|
||||
|
||||
|
||||
execute_pipeline(evtdir = datapath,
|
||||
DataType = "raw", # raw, pkl
|
||||
kk=1,
|
||||
source_type = "video", # video, image,
|
||||
save_path = savepath,
|
||||
yolo_ver = "V10", # V10, V5
|
||||
weight_yolo_v5 = r'./ckpts/best_cls10_0906.pt' ,
|
||||
weight_yolo_v10 = r'./ckpts/best_v10s_width0375_1205.pt',
|
||||
saveimages = False
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
Binary file not shown.
BIN
contrast/__pycache__/event_test.cpython-312.pyc
Normal file
BIN
contrast/__pycache__/event_test.cpython-312.pyc
Normal file
Binary file not shown.
BIN
contrast/__pycache__/genfeats.cpython-312.pyc
Normal file
BIN
contrast/__pycache__/genfeats.cpython-312.pyc
Normal file
Binary file not shown.
BIN
contrast/__pycache__/one2n_contrast.cpython-312.pyc
Normal file
BIN
contrast/__pycache__/one2n_contrast.cpython-312.pyc
Normal file
Binary file not shown.
@ -9,17 +9,19 @@ import cv2
|
||||
import json
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from pathlib import Path
|
||||
|
||||
from matplotlib import rcParams
|
||||
from matplotlib.font_manager import FontProperties
|
||||
from scipy.spatial.distance import cdist
|
||||
from utils.event import ShoppingEvent, save_data
|
||||
|
||||
|
||||
from utils.calsimi import calsimi_vs_stdfeat_new, get_topk_percent, cluster
|
||||
from utils.tools import get_evtList
|
||||
import pickle
|
||||
|
||||
rcParams['font.sans-serif'] = ['SimHei'] # 用黑体显示中文
|
||||
rcParams['axes.unicode_minus'] = False # 正确显示负号
|
||||
|
||||
|
||||
'''*********** USearch ***********'''
|
||||
def read_usearch():
|
||||
stdFeaturePath = r"D:\contrast\stdlib\v11_test.json"
|
||||
@ -35,13 +37,12 @@ def read_usearch():
|
||||
|
||||
return stdlib
|
||||
|
||||
def get_eventlist():
|
||||
def get_eventlist_errortxt(evtpaths):
|
||||
'''
|
||||
读取一次测试中的错误事件
|
||||
'''
|
||||
evtpaths = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\images"
|
||||
text1 = "one2n_Error.txt"
|
||||
text2 = "one2SN_Error.txt"
|
||||
text1 = "one_2_Small_n_Error.txt"
|
||||
text2 = "one_2_Big_N_Error.txt"
|
||||
events = []
|
||||
text = (text1, text2)
|
||||
for txt in text:
|
||||
@ -54,15 +55,15 @@ def get_eventlist():
|
||||
fpath=os.path.join(evtpaths, line)
|
||||
events.append(fpath)
|
||||
|
||||
|
||||
|
||||
events = list(set(events))
|
||||
|
||||
return events
|
||||
|
||||
def single_event():
|
||||
|
||||
events = get_eventlist()
|
||||
|
||||
|
||||
def save_eventdata():
|
||||
evtpaths = r"/home/wqg/dataset/test_dataset/performence_dataset/"
|
||||
events = get_eventlist_errortxt(evtpaths)
|
||||
|
||||
'''定义当前事件存储地址及生成相应文件件'''
|
||||
resultPath = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\result\single_event"
|
||||
@ -74,121 +75,148 @@ def single_event():
|
||||
|
||||
|
||||
|
||||
def get_topk_percent(data, k):
|
||||
"""
|
||||
获取数据中最大的 k% 的元素
|
||||
"""
|
||||
# 将数据转换为 NumPy 数组
|
||||
if isinstance(data, list):
|
||||
data = np.array(data)
|
||||
# def get_topk_percent(data, k):
|
||||
# """
|
||||
# 获取数据中最大的 k% 的元素
|
||||
# """
|
||||
# # 将数据转换为 NumPy 数组
|
||||
# if isinstance(data, list):
|
||||
# data = np.array(data)
|
||||
|
||||
percentile = np.percentile(data, 100-k)
|
||||
top_k_percent = data[data >= percentile]
|
||||
# percentile = np.percentile(data, 100-k)
|
||||
# top_k_percent = data[data >= percentile]
|
||||
|
||||
return top_k_percent
|
||||
def cluster(data, thresh=0.15):
|
||||
# data = np.array([0.1, 0.13, 0.7, 0.2, 0.8, 0.52, 0.3, 0.7, 0.85, 0.58])
|
||||
# data = np.array([0.1, 0.13, 0.2, 0.3])
|
||||
# data = np.array([0.1])
|
||||
# return top_k_percent
|
||||
# def cluster(data, thresh=0.15):
|
||||
# # data = np.array([0.1, 0.13, 0.7, 0.2, 0.8, 0.52, 0.3, 0.7, 0.85, 0.58])
|
||||
# # data = np.array([0.1, 0.13, 0.2, 0.3])
|
||||
# # data = np.array([0.1])
|
||||
|
||||
if isinstance(data, list):
|
||||
data = np.array(data)
|
||||
# if isinstance(data, list):
|
||||
# data = np.array(data)
|
||||
|
||||
data1 = np.sort(data)
|
||||
cluter, Cluters, = [data1[0]], []
|
||||
for i in range(1, len(data1)):
|
||||
if data1[i] - data1[i-1]< thresh:
|
||||
cluter.append(data1[i])
|
||||
else:
|
||||
Cluters.append(cluter)
|
||||
cluter = [data1[i]]
|
||||
Cluters.append(cluter)
|
||||
# data1 = np.sort(data)
|
||||
# cluter, Cluters, = [data1[0]], []
|
||||
# for i in range(1, len(data1)):
|
||||
# if data1[i] - data1[i-1]< thresh:
|
||||
# cluter.append(data1[i])
|
||||
# else:
|
||||
# Cluters.append(cluter)
|
||||
# cluter = [data1[i]]
|
||||
# Cluters.append(cluter)
|
||||
|
||||
clt_center = []
|
||||
for clt in Cluters:
|
||||
## 是否应该在此处限制一个聚类中的最小轨迹样本数,应该将该因素放在轨迹分析中
|
||||
# if len(clt)>=3:
|
||||
# clt_center.append(np.mean(clt))
|
||||
clt_center.append(np.mean(clt))
|
||||
# clt_center = []
|
||||
# for clt in Cluters:
|
||||
# ## 是否应该在此处限制一个聚类中的最小轨迹样本数,应该将该因素放在轨迹分析中
|
||||
# # if len(clt)>=3:
|
||||
# # clt_center.append(np.mean(clt))
|
||||
# clt_center.append(np.mean(clt))
|
||||
|
||||
# print(clt_center)
|
||||
# # print(clt_center)
|
||||
|
||||
return clt_center
|
||||
# return clt_center
|
||||
|
||||
def calc_simil(event, stdfeat):
|
||||
'''事件与标准库的对比策略
|
||||
该比对策略是否可以拓展到事件与事件的比对?
|
||||
'''
|
||||
# def calsimi_vs_stdfeat_new(event, stdfeat):
|
||||
# '''事件与标准库的对比策略
|
||||
# 该比对策略是否可以拓展到事件与事件的比对?
|
||||
# '''
|
||||
|
||||
|
||||
def calsiml(feat1, feat2, topkp=75, cluth=0.15):
|
||||
'''轨迹样本和标准特征集样本相似度的选择策略'''
|
||||
matrix = 1 - cdist(feat1, feat2, 'cosine')
|
||||
simi_max = []
|
||||
for i in range(len(matrix)):
|
||||
sim = np.mean(get_topk_percent(matrix[i, :], topkp))
|
||||
simi_max.append(sim)
|
||||
cltc_max = cluster(simi_max, cluth)
|
||||
Simi = max(cltc_max)
|
||||
# def calsiml(feat1, feat2, topkp=75, cluth=0.15):
|
||||
# '''轨迹样本和标准特征集样本相似度的选择策略'''
|
||||
# matrix = 1 - cdist(feat1, feat2, 'cosine')
|
||||
# simi_max = []
|
||||
# for i in range(len(matrix)):
|
||||
# sim = np.mean(get_topk_percent(matrix[i, :], topkp))
|
||||
# simi_max.append(sim)
|
||||
# cltc_max = cluster(simi_max, cluth)
|
||||
# Simi = max(cltc_max)
|
||||
|
||||
## cltc_max为空属于编程考虑不周,应予以排查解决
|
||||
# if len(cltc_max):
|
||||
# Simi = max(cltc_max)
|
||||
# else:
|
||||
# Simi = 0 #不应该走到该处
|
||||
# ## cltc_max为空属于编程考虑不周,应予以排查解决
|
||||
# # if len(cltc_max):
|
||||
# # Simi = max(cltc_max)
|
||||
# # else:
|
||||
# # Simi = 0 #不应该走到该处
|
||||
|
||||
|
||||
return Simi
|
||||
# return Simi
|
||||
|
||||
|
||||
front_boxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
|
||||
front_feats = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||
for i in range(len(event.front_boxes)):
|
||||
front_boxes = np.concatenate((front_boxes, event.front_boxes[i]), axis=0)
|
||||
front_feats = np.concatenate((front_feats, event.front_feats[i]), axis=0)
|
||||
# front_boxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
|
||||
# front_feats = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||
# for i in range(len(event.front_boxes)):
|
||||
# front_boxes = np.concatenate((front_boxes, event.front_boxes[i]), axis=0)
|
||||
# front_feats = np.concatenate((front_feats, event.front_feats[i]), axis=0)
|
||||
|
||||
back_boxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
|
||||
back_feats = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||
for i in range(len(event.back_boxes)):
|
||||
back_boxes = np.concatenate((back_boxes, event.back_boxes[i]), axis=0)
|
||||
back_feats = np.concatenate((back_feats, event.back_feats[i]), axis=0)
|
||||
# back_boxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
|
||||
# back_feats = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||
# for i in range(len(event.back_boxes)):
|
||||
# back_boxes = np.concatenate((back_boxes, event.back_boxes[i]), axis=0)
|
||||
# back_feats = np.concatenate((back_feats, event.back_feats[i]), axis=0)
|
||||
|
||||
if len(front_feats):
|
||||
front_simi = calsiml(front_feats, stdfeat)
|
||||
if len(back_feats):
|
||||
back_simi = calsiml(back_feats, stdfeat)
|
||||
# if len(front_feats):
|
||||
# front_simi = calsiml(front_feats, stdfeat)
|
||||
# if len(back_feats):
|
||||
# back_simi = calsiml(back_feats, stdfeat)
|
||||
|
||||
# '''前后摄相似度融合策略'''
|
||||
# if len(front_feats) and len(back_feats):
|
||||
# diff_simi = abs(front_simi - back_simi)
|
||||
# if diff_simi>0.15:
|
||||
# Similar = max([front_simi, back_simi])
|
||||
# else:
|
||||
# Similar = (front_simi+back_simi)/2
|
||||
# elif len(front_feats) and len(back_feats)==0:
|
||||
# Similar = front_simi
|
||||
# elif len(front_feats)==0 and len(back_feats):
|
||||
# Similar = back_simi
|
||||
# else:
|
||||
# Similar = None # 在event.front_feats和event.back_feats同时为空时
|
||||
|
||||
# return Similar
|
||||
|
||||
'''前后摄相似度融合策略'''
|
||||
if len(front_feats) and len(back_feats):
|
||||
diff_simi = abs(front_simi - back_simi)
|
||||
if diff_simi>0.15:
|
||||
Similar = max([front_simi, back_simi])
|
||||
else:
|
||||
Similar = (front_simi+back_simi)/2
|
||||
elif len(front_feats) and len(back_feats)==0:
|
||||
Similar = front_simi
|
||||
elif len(front_feats)==0 and len(back_feats):
|
||||
Similar = back_simi
|
||||
else:
|
||||
Similar = None # 在event.front_feats和event.back_feats同时为空时
|
||||
|
||||
return Similar
|
||||
|
||||
|
||||
def simi_matrix():
|
||||
resultPath = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\result\single_event"
|
||||
evtpaths = r"/home/wqg/dataset/pipeline/contrast/single_event_V10/evtobjs/"
|
||||
|
||||
stdlib = read_usearch()
|
||||
events = get_eventlist()
|
||||
for evtpath in events:
|
||||
evtname = os.path.basename(evtpath)
|
||||
_, barcode = evtname.split("_")
|
||||
stdfeatPath = r"/home/wqg/dataset/test_dataset/total_barcode/features_json/v11_barcode_0304/"
|
||||
resultPath = r"/home/wqg/dataset/performence_dataset/result/"
|
||||
|
||||
# 生成事件与相应标准特征集
|
||||
event = ShoppingEvent(evtpath)
|
||||
stdfeat = stdlib[barcode]
|
||||
evt_paths, bcdSet = get_evtList(evtpaths)
|
||||
|
||||
Similar = calc_simil(event, stdfeat)
|
||||
## read std features
|
||||
stdDict={}
|
||||
evtDict = {}
|
||||
for barcode in bcdSet:
|
||||
stdpath = os.path.join(stdfeatPath, f"{barcode}.json")
|
||||
if not os.path.isfile(stdpath):
|
||||
continue
|
||||
|
||||
with open(stdpath, 'r', encoding='utf-8') as f:
|
||||
stddata = json.load(f)
|
||||
feat = np.array(stddata["value"])
|
||||
stdDict[barcode] = feat
|
||||
|
||||
for evtpath in evt_paths:
|
||||
barcode = Path(evtpath).stem.split("_")[-1]
|
||||
|
||||
if barcode not in stdDict.keys():
|
||||
continue
|
||||
|
||||
# try:
|
||||
# with open(evtpath, 'rb') as f:
|
||||
# evtdata = pickle.load(f)
|
||||
# except Exception as e:
|
||||
# print(evtname)
|
||||
|
||||
with open(evtpath, 'rb') as f:
|
||||
event = pickle.load(f)
|
||||
|
||||
stdfeat = stdDict[barcode]
|
||||
|
||||
Similar = calsimi_vs_stdfeat_new(event, stdfeat)
|
||||
|
||||
# 构造 boxes 子图存储路径
|
||||
subimgpath = os.path.join(resultPath, f"{event.evtname}", "subimg")
|
||||
@ -217,9 +245,9 @@ def simi_matrix():
|
||||
evtfeat = np.concatenate((evtfeat, event.back_feats[i]), axis=0)
|
||||
imgpaths = event.back_imgpaths
|
||||
|
||||
assert len(boxes)==len(evtfeat), f"Please check the Event: {evtname}"
|
||||
assert len(boxes)==len(evtfeat), f"Please check the Event: {event.evtname}"
|
||||
if len(boxes)==0: continue
|
||||
print(evtname)
|
||||
print(event.evtname)
|
||||
|
||||
matrix = 1 - cdist(evtfeat, stdfeat, 'cosine')
|
||||
simi_1d = matrix.flatten()
|
||||
@ -309,8 +337,8 @@ def simi_matrix():
|
||||
mean_diff = abs(mean_values[1]-mean_values[0])
|
||||
ax[0, 1].set_title(f"mean diff: {mean_diff:.3f}")
|
||||
if len(max_values)==2:
|
||||
max_values = abs(max_values[1]-max_values[0])
|
||||
ax[0, 2].set_title(f"max diff: {max_values:.3f}")
|
||||
max_diff = abs(max_values[1]-max_values[0])
|
||||
ax[0, 2].set_title(f"max diff: {max_diff:.3f}")
|
||||
try:
|
||||
fig.suptitle(f"Similar: {Similar:.3f}", fontsize=16)
|
||||
except Exception as e:
|
||||
@ -319,19 +347,14 @@ def simi_matrix():
|
||||
pltpath = os.path.join(subimgpath, f"hist_max_{kpercent}%_.png")
|
||||
plt.savefig(pltpath)
|
||||
|
||||
pltpath1 = os.path.join(histpath, f"{evtname}_.png")
|
||||
pltpath1 = os.path.join(histpath, f"{event.evtname}_.png")
|
||||
plt.savefig(pltpath1)
|
||||
|
||||
|
||||
plt.close()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
simi_matrix()
|
||||
|
||||
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -38,13 +38,13 @@ def get_std_barcodeDict(bcdpath, savepath, bcdSet):
|
||||
'''
|
||||
inputs:
|
||||
bcdpath: 已清洗的barcode样本图像,如果barcode下有'base'文件夹,只选用该文件夹下图像
|
||||
(default = r'\\192.168.1.28\share\已标注数据备份\对比数据\barcode\barcode_1771')
|
||||
(default = r'\\\\192.168.1.28\\share\\已标注数据备份\\对比数据\\barcode\\barcode_1771')
|
||||
功能:
|
||||
生成并保存只有一个key值的字典 {barcode: [imgpath1, imgpath1, ...]},
|
||||
savepath: 字典存储地址,文件名格式:barcode.pickle
|
||||
'''
|
||||
|
||||
# savepath = r'\\192.168.1.28\share\测试_202406\contrast\std_barcodes'
|
||||
# savepath = r'\\\\192.168.1.28\\share\\测试_202406\\contrast\\std_barcodes'
|
||||
|
||||
'''读取数据集中 barcode 列表'''
|
||||
stdBarcodeList = []
|
||||
|
@ -6,57 +6,11 @@ Created on Wed Dec 18 11:49:01 2024
|
||||
"""
|
||||
import os
|
||||
import pickle
|
||||
import copy
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
import matplotlib.pyplot as plt
|
||||
from scipy.spatial.distance import cdist
|
||||
from utils.event import ShoppingEvent
|
||||
|
||||
|
||||
def init_eventDict(sourcePath, eventDataPath, stype="data"):
|
||||
'''stype: str,
|
||||
'source': 由 videos 或 images 生成的 pickle 文件
|
||||
'data': 从 data 文件中读取的现场运行数据
|
||||
"realtime": 全实时数据,从 data 文件中读取的现场运行数据
|
||||
|
||||
sourcePath:事件文件夹,事件类型包含2种:
|
||||
(1) pipeline生成的 pickle 文件
|
||||
(2) 直接采集的事件文件夹
|
||||
'''
|
||||
k, errEvents = 0, []
|
||||
for evtname in os.listdir(sourcePath):
|
||||
bname, ext = os.path.splitext(evtname)
|
||||
source_path = os.path.join(sourcePath, evtname)
|
||||
|
||||
if stype=="source" and ext not in ['.pkl', '.pickle']: continue
|
||||
if stype=="data" and os.path.isfile(source_path): continue
|
||||
if stype=="realtime" and os.path.isfile(source_path): continue
|
||||
|
||||
evt = bname.split('_')
|
||||
condt = len(evt)>=2 and evt[-1].isdigit() and len(evt[-1])>=10
|
||||
if not condt: continue
|
||||
|
||||
pickpath = os.path.join(eventDataPath, f"{bname}.pickle")
|
||||
if os.path.isfile(pickpath): continue
|
||||
|
||||
# event = ShoppingEvent(source_path, stype)
|
||||
try:
|
||||
event = ShoppingEvent(source_path, stype)
|
||||
with open(pickpath, 'wb') as f:
|
||||
pickle.dump(event, f)
|
||||
print(evtname)
|
||||
except Exception as e:
|
||||
errEvents.append(source_path)
|
||||
print(f"Error: {evtname}, {e}")
|
||||
# k += 1
|
||||
# if k==1:
|
||||
# break
|
||||
|
||||
errfile = Path(eventDataPath).parent / 'error_events.txt'
|
||||
with open(str(errfile), 'a', encoding='utf-8') as f:
|
||||
for line in errEvents:
|
||||
f.write(line + '\n')
|
||||
from utils.tools import init_eventDict
|
||||
|
||||
def read_eventdict(eventDataPath):
|
||||
evtDict = {}
|
||||
|
@ -27,188 +27,24 @@ Created on Fri Aug 30 17:53:03 2024
|
||||
|
||||
"""
|
||||
import numpy as np
|
||||
import cv2
|
||||
import os
|
||||
import sys
|
||||
import random
|
||||
import pickle
|
||||
import json
|
||||
import random
|
||||
import copy
|
||||
import sys
|
||||
# import torch
|
||||
import time
|
||||
# import json
|
||||
|
||||
from pathlib import Path
|
||||
from scipy.spatial.distance import cdist
|
||||
import matplotlib.pyplot as plt
|
||||
import shutil
|
||||
from datetime import datetime
|
||||
# from openpyxl import load_workbook, Workbook
|
||||
|
||||
# from config import config as conf
|
||||
# from model import resnet18 as resnet18
|
||||
# from feat_inference import inference_image
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[1] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT))
|
||||
|
||||
sys.path.append(r"D:\DetectTracking")
|
||||
from tracking.utils.read_data import extract_data, read_tracking_output, read_similar, read_deletedBarcode_file
|
||||
from tracking.utils.plotting import Annotator, colors
|
||||
from feat_extract.config import config as conf
|
||||
from feat_extract.inference import FeatsInterface
|
||||
from utils.event import ShoppingEvent, save_data
|
||||
from utils.calsimi import calsimi_vs_stdfeat, calsimi_vs_stdfeat_new
|
||||
from utils.tools import get_evtList, init_eventDict
|
||||
from utils.databits import data_precision_compare
|
||||
from genfeats import gen_bcd_features
|
||||
from event_test import calc_simil
|
||||
from one2n_contrast import init_eventDict
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def int8_to_ft16(arr_uint8, amin, amax):
|
||||
arr_ft16 = (arr_uint8 / 255 * (amax-amin) + amin).astype(np.float16)
|
||||
|
||||
return arr_ft16
|
||||
|
||||
def ft16_to_uint8(arr_ft16):
|
||||
# pickpath = r"\\192.168.1.28\share\测试_202406\contrast\std_features_ft32vsft16\6902265587712_ft16.pickle"
|
||||
|
||||
# with open(pickpath, 'rb') as f:
|
||||
# edict = pickle.load(f)
|
||||
|
||||
# arr_ft16 = edict['feats']
|
||||
|
||||
amin = np.min(arr_ft16)
|
||||
amax = np.max(arr_ft16)
|
||||
arr_ft255 = (arr_ft16 - amin) * 255 / (amax-amin)
|
||||
arr_uint8 = arr_ft255.astype(np.uint8)
|
||||
|
||||
arr_ft16_ = int8_to_ft16(arr_uint8, amin, amax)
|
||||
|
||||
arrDistNorm = np.linalg.norm(arr_ft16_ - arr_ft16) / arr_ft16_.size
|
||||
|
||||
return arr_uint8, arr_ft16_
|
||||
|
||||
|
||||
def data_precision_compare(stdfeat, evtfeat, evtMessage, save=True):
|
||||
evt, stdbcd, label = evtMessage
|
||||
rltdata, rltdata_ft16, rltdata_ft16_ = [], [], []
|
||||
|
||||
matrix = 1 - cdist(stdfeat, evtfeat, 'cosine')
|
||||
simi_mean = np.mean(matrix)
|
||||
simi_max = np.max(matrix)
|
||||
stdfeatm = np.mean(stdfeat, axis=0, keepdims=True)
|
||||
evtfeatm = np.mean(evtfeat, axis=0, keepdims=True)
|
||||
simi_mfeat = 1- np.maximum(0.0, cdist(stdfeatm, evtfeatm, 'cosine'))
|
||||
rltdata = [label, stdbcd, evt, simi_mean, simi_max, simi_mfeat[0,0]]
|
||||
|
||||
|
||||
##================================================================= float16
|
||||
stdfeat_ft16 = stdfeat.astype(np.float16)
|
||||
evtfeat_ft16 = evtfeat.astype(np.float16)
|
||||
stdfeat_ft16 /= np.linalg.norm(stdfeat_ft16, axis=1)[:, None]
|
||||
evtfeat_ft16 /= np.linalg.norm(evtfeat_ft16, axis=1)[:, None]
|
||||
|
||||
|
||||
matrix_ft16 = 1 - cdist(stdfeat_ft16, evtfeat_ft16, 'cosine')
|
||||
simi_mean_ft16 = np.mean(matrix_ft16)
|
||||
simi_max_ft16 = np.max(matrix_ft16)
|
||||
stdfeatm_ft16 = np.mean(stdfeat_ft16, axis=0, keepdims=True)
|
||||
evtfeatm_ft16 = np.mean(evtfeat_ft16, axis=0, keepdims=True)
|
||||
simi_mfeat_ft16 = 1- np.maximum(0.0, cdist(stdfeatm_ft16, evtfeatm_ft16, 'cosine'))
|
||||
rltdata_ft16 = [label, stdbcd, evt, simi_mean_ft16, simi_max_ft16, simi_mfeat_ft16[0,0]]
|
||||
|
||||
'''****************** uint8 is ok!!!!!! ******************'''
|
||||
##=================================================================== uint8
|
||||
# stdfeat_uint8, stdfeat_ft16_ = ft16_to_uint8(stdfeat_ft16)
|
||||
# evtfeat_uint8, evtfeat_ft16_ = ft16_to_uint8(evtfeat_ft16)
|
||||
|
||||
stdfeat_uint8 = (stdfeat_ft16*128).astype(np.int8)
|
||||
evtfeat_uint8 = (evtfeat_ft16*128).astype(np.int8)
|
||||
stdfeat_ft16_ = stdfeat_uint8.astype(np.float16)/128
|
||||
evtfeat_ft16_ = evtfeat_uint8.astype(np.float16)/128
|
||||
|
||||
absdiff = np.linalg.norm(stdfeat_ft16_ - stdfeat) / stdfeat.size
|
||||
|
||||
matrix_ft16_ = 1 - cdist(stdfeat_ft16_, evtfeat_ft16_, 'cosine')
|
||||
simi_mean_ft16_ = np.mean(matrix_ft16_)
|
||||
simi_max_ft16_ = np.max(matrix_ft16_)
|
||||
stdfeatm_ft16_ = np.mean(stdfeat_ft16_, axis=0, keepdims=True)
|
||||
evtfeatm_ft16_ = np.mean(evtfeat_ft16_, axis=0, keepdims=True)
|
||||
simi_mfeat_ft16_ = 1- np.maximum(0.0, cdist(stdfeatm_ft16_, evtfeatm_ft16_, 'cosine'))
|
||||
rltdata_ft16_ = [label, stdbcd, evt, simi_mean_ft16_, simi_max_ft16_, simi_mfeat_ft16_[0,0]]
|
||||
|
||||
if not save:
|
||||
return
|
||||
|
||||
|
||||
##========================================================= save as float32
|
||||
rppath = os.path.join(similPath, f'{evt}_ft32.pickle')
|
||||
with open(rppath, 'wb') as f:
|
||||
pickle.dump(rltdata, f)
|
||||
|
||||
rtpath = os.path.join(similPath, f'{evt}_ft32.txt')
|
||||
with open(rtpath, 'w', encoding='utf-8') as f:
|
||||
for result in rltdata:
|
||||
part = [f"{x:.3f}" if isinstance(x, float) else str(x) for x in result]
|
||||
line = ', '.join(part)
|
||||
f.write(line + '\n')
|
||||
|
||||
|
||||
##========================================================= save as float16
|
||||
rppath_ft16 = os.path.join(similPath, f'{evt}_ft16.pickle')
|
||||
with open(rppath_ft16, 'wb') as f:
|
||||
pickle.dump(rltdata_ft16, f)
|
||||
|
||||
rtpath_ft16 = os.path.join(similPath, f'{evt}_ft16.txt')
|
||||
with open(rtpath_ft16, 'w', encoding='utf-8') as f:
|
||||
for result in rltdata_ft16:
|
||||
part = [f"{x:.3f}" if isinstance(x, float) else str(x) for x in result]
|
||||
line = ', '.join(part)
|
||||
f.write(line + '\n')
|
||||
|
||||
|
||||
##=========================================================== save as uint8
|
||||
rppath_uint8 = os.path.join(similPath, f'{evt}_uint8.pickle')
|
||||
with open(rppath_uint8, 'wb') as f:
|
||||
pickle.dump(rltdata_ft16_, f)
|
||||
|
||||
rtpath_uint8 = os.path.join(similPath, f'{evt}_uint8.txt')
|
||||
with open(rtpath_uint8, 'w', encoding='utf-8') as f:
|
||||
for result in rltdata_ft16_:
|
||||
part = [f"{x:.3f}" if isinstance(x, float) else str(x) for x in result]
|
||||
line = ', '.join(part)
|
||||
f.write(line + '\n')
|
||||
|
||||
|
||||
|
||||
|
||||
def simi_calc(event, stdfeat):
|
||||
evtfeat = event.feats_compose
|
||||
if isinstance(event.feats_select, list):
|
||||
if len(event.feats_select) and len(event.feats_select[0]):
|
||||
evtfeat = event.feats_select[0]
|
||||
else:
|
||||
return None, None, None
|
||||
else:
|
||||
evtfeat = event.feats_select
|
||||
|
||||
if len(evtfeat)==0 or len(stdfeat)==0:
|
||||
return None, None, None
|
||||
|
||||
|
||||
evtfeat /= np.linalg.norm(evtfeat, axis=1)[:, None]
|
||||
stdfeat /= np.linalg.norm(stdfeat, axis=1)[:, None]
|
||||
|
||||
matrix = 1 - cdist(evtfeat, stdfeat, 'cosine')
|
||||
matrix[matrix < 0] = 0
|
||||
|
||||
simi_mean = np.mean(matrix)
|
||||
simi_max = np.max(matrix)
|
||||
stdfeatm = np.mean(stdfeat, axis=0, keepdims=True)
|
||||
evtfeatm = np.mean(evtfeat, axis=0, keepdims=True)
|
||||
simi_mfeat = 1- np.maximum(0.0, cdist(stdfeatm, evtfeatm, 'cosine'))
|
||||
|
||||
return simi_mean, simi_max, simi_mfeat[0,0]
|
||||
|
||||
|
||||
def build_std_evt_dict():
|
||||
@ -219,18 +55,6 @@ def build_std_evt_dict():
|
||||
|
||||
stdBarcode = [p.stem for p in Path(stdFeaturePath).iterdir() if p.is_file() and (p.suffix=='.json' or p.suffix=='.pickle')]
|
||||
|
||||
'''*********** USearch ***********'''
|
||||
# stdFeaturePath = r"D:\contrast\stdlib\v11_test.json"
|
||||
# stdBarcode = []
|
||||
# stdlib = {}
|
||||
# with open(stdFeaturePath, 'r', encoding='utf-8') as f:
|
||||
# data = json.load(f)
|
||||
# for dic in data['total']:
|
||||
# barcode = dic['key']
|
||||
# feature = np.array(dic['value'])
|
||||
# stdBarcode.append(barcode)
|
||||
# stdlib[barcode] = feature
|
||||
|
||||
'''======1. 购物事件列表,该列表中的 Barcode 存在于标准的 stdBarcode 内 ==='''
|
||||
evtList = [(p.stem, p.stem.split('_')[-1]) for p in Path(eventDataPath).iterdir()
|
||||
if p.is_file()
|
||||
@ -260,9 +84,6 @@ def build_std_evt_dict():
|
||||
feat = stddata["feats_ft32"]
|
||||
stdDict[barcode] = feat
|
||||
|
||||
|
||||
|
||||
|
||||
'''*********** USearch ***********'''
|
||||
# stdDict = {}
|
||||
# for barcode in barcodes:
|
||||
@ -282,7 +103,7 @@ def build_std_evt_dict():
|
||||
|
||||
return evtList, evtDict, stdDict
|
||||
|
||||
def one2SN_pr(evtList, evtDict, stdDict):
|
||||
def one2SN_pr(evtList, evtDict, stdDict, simType="simple"):
|
||||
|
||||
std_barcodes = set([bcd for _, bcd in evtList])
|
||||
|
||||
@ -312,8 +133,14 @@ def one2SN_pr(evtList, evtDict, stdDict):
|
||||
barcodes, similars = [], []
|
||||
for stdbcd in bcd_selected:
|
||||
stdfeat = stdDict[stdbcd]
|
||||
simi_mean, simi_max, simi_mfeat = simi_calc(event, stdfeat)
|
||||
# simi_mean = calc_simil(event, stdfeat)
|
||||
|
||||
if simType=="typea":
|
||||
simi_mean, simi_max, simi_mfeat = calsimi_vs_stdfeat(event, stdfeat)
|
||||
elif simType=="typeb":
|
||||
pass
|
||||
else:
|
||||
simi_mean, simi_1, simi_2 = calsimi_vs_stdfeat_new(event, stdfeat)
|
||||
|
||||
|
||||
## 在event.front_feats和event.back_feats同时为空时,此处不需要保护
|
||||
# if simi_mean==None:
|
||||
@ -376,6 +203,10 @@ def one2SN_pr(evtList, evtDict, stdDict):
|
||||
ax.set_xlabel(f"Event Num: {len(tp_events) + len(fn_events)}")
|
||||
ax.legend()
|
||||
plt.show()
|
||||
|
||||
rltpath = os.path.join(similPath, f'pr_1toSN_{simType}.png')
|
||||
plt.savefig(rltpath)
|
||||
|
||||
## ============================= 1:N 展厅 直方图'''
|
||||
fig, axes = plt.subplots(2, 2)
|
||||
axes[0, 0].hist(tp_simi, bins=60, range=(-0.2, 1), edgecolor='black')
|
||||
@ -392,10 +223,13 @@ def one2SN_pr(evtList, evtDict, stdDict):
|
||||
axes[1, 1].set_title(f'FN({len(fn_simi)})')
|
||||
plt.show()
|
||||
|
||||
rltpath = os.path.join(similPath, f'hist_1toSN_{simType}.png')
|
||||
plt.savefig(rltpath)
|
||||
|
||||
|
||||
|
||||
def one2one_simi(evtList, evtDict, stdDict):
|
||||
|
||||
def one2one_simi(evtList, evtDict, stdDict, simType):
|
||||
|
||||
barcodes = set([bcd for _, bcd in evtList])
|
||||
'''======1 构造 3 个事件对: 扫 A 放 A, 扫 A 放 B, 合并 ===================='''
|
||||
@ -422,31 +256,50 @@ def one2one_simi(evtList, evtDict, stdDict):
|
||||
|
||||
stdfeat = stdDict[stdbcd] # float32
|
||||
|
||||
simi_mean, simi_max, simi_mfeat = simi_calc(event, stdfeat)
|
||||
if simType=="typea":
|
||||
simi_mean, simi_1, simi_2 = calsimi_vs_stdfeat_new(event, stdfeat)
|
||||
elif simType=="typeb":
|
||||
pass
|
||||
else:
|
||||
simi_mean, simi_1, simi_2 = calsimi_vs_stdfeat(event, stdfeat)
|
||||
|
||||
if simi_mean is None:
|
||||
continue
|
||||
|
||||
rltdata.append((label, stdbcd, evtname, simi_mean, simi_max, simi_mfeat))
|
||||
rltdata.append((label, stdbcd, evtname, simi_mean, simi_1, simi_2))
|
||||
|
||||
'''================ float32、16、int8 精度比较与存储 ============='''
|
||||
# data_precision_compare(stdfeat, evtfeat, mergePairs[i], save=True)
|
||||
# data_precision_compare(stdfeat, evtfeat, mergePairs[i], similPath, save=True)
|
||||
|
||||
errorFile_one2one = list(set(errorFile_one2one))
|
||||
|
||||
return rltdata, errorFile_one2one
|
||||
|
||||
|
||||
def one2one_pr(evtList, evtDict, stdDict):
|
||||
def one2one_pr(evtList, evtDict, stdDict, simType="simple"):
|
||||
|
||||
rltdata, errorFile_one2one = one2one_simi(evtList, evtDict, stdDict)
|
||||
rltdata, errorFile_one2one = one2one_simi(evtList, evtDict, stdDict, simType)
|
||||
|
||||
Same, Cross = [], []
|
||||
|
||||
for label, stdbcd, evtname, simi_mean, simi_max, simi_mft in rltdata:
|
||||
if label == "same":
|
||||
if simType=="simple" and label == "same":
|
||||
Same.append(simi_max)
|
||||
if label == "diff":
|
||||
if simType=="simple" and label == "diff":
|
||||
Cross.append(simi_max)
|
||||
|
||||
if simType=="typea" and label == "same":
|
||||
Same.append(simi_mean)
|
||||
if simType=="typea" and label == "diff":
|
||||
Cross.append(simi_mean)
|
||||
|
||||
|
||||
# for label, stdbcd, evtname, simi_mean, simi_max, simi_mft in rltdata:
|
||||
# if label == "same":
|
||||
# Same.append(simi_mean)
|
||||
# if label == "diff":
|
||||
# Cross.append(simi_mean)
|
||||
|
||||
Same = np.array(Same)
|
||||
Cross = np.array(Cross)
|
||||
TPFN = len(Same)
|
||||
@ -508,7 +361,7 @@ def one2one_pr(evtList, evtDict, stdDict):
|
||||
ax.legend()
|
||||
plt.show()
|
||||
|
||||
rltpath = os.path.join(similPath, 'pr.png')
|
||||
rltpath = os.path.join(similPath, f'pr_1to1_{simType}.png')
|
||||
plt.savefig(rltpath) # svg, png, pdf
|
||||
|
||||
|
||||
@ -521,7 +374,7 @@ def one2one_pr(evtList, evtDict, stdDict):
|
||||
axes[1].set_xlim([-0.2, 1])
|
||||
axes[1].set_title(f'TN({len(Cross)})')
|
||||
|
||||
rltpath = os.path.join(similPath, 'hist.png')
|
||||
rltpath = os.path.join(similPath, f'hist_1to1_{simType}.png')
|
||||
plt.savefig(rltpath)
|
||||
|
||||
|
||||
@ -529,158 +382,25 @@ def one2one_pr(evtList, evtDict, stdDict):
|
||||
|
||||
|
||||
|
||||
def gen_eventdict(sourcePath, saveimg=True):
|
||||
k, errEvents = 0, []
|
||||
for source_path in sourcePath:
|
||||
evtpath, bname = os.path.split(source_path)
|
||||
|
||||
## 兼容事件的两种情况:文件夹 和 Yolo-Resnet-Tracker 的输出
|
||||
if os.path.isfile(source_path):
|
||||
bname, ext = os.path.splitext(bname)
|
||||
# evt = bname.split("_")
|
||||
|
||||
evt = bname.split('_')
|
||||
condt = len(evt)>=2 and evt[-1].isdigit() and len(evt[-1])>=10
|
||||
if not condt: continue
|
||||
|
||||
|
||||
# 如果已完成事件生成,则不执行
|
||||
pickpath = os.path.join(eventDataPath, f"{bname}.pickle")
|
||||
if os.path.isfile(pickpath): continue
|
||||
|
||||
try:
|
||||
event = ShoppingEvent(source_path, stype=source_type)
|
||||
# save_data(event, resultPath)
|
||||
|
||||
with open(pickpath, 'wb') as f:
|
||||
pickle.dump(event, f)
|
||||
print(bname)
|
||||
except Exception as e:
|
||||
errEvents.append(source_path)
|
||||
print(e)
|
||||
|
||||
# k += 1
|
||||
# if k==1:
|
||||
# break
|
||||
|
||||
errfile = os.path.join(resultPath, 'error_events.txt')
|
||||
# with open(errfile, 'w', encoding='utf-8') as f:
|
||||
# for line in errEvents:
|
||||
# f.write(line + '\n')
|
||||
|
||||
|
||||
# def init_std_evt_dict():
|
||||
# '''==== 0. 生成事件列表和对应的 Barcodes列表 ==========='''
|
||||
# bcdList, event_spath = [], []
|
||||
# for evtname in os.listdir(eventSourcePath):
|
||||
# bname, ext = os.path.splitext(evtname)
|
||||
|
||||
# ## 处理事件的两种情况:文件夹 和 Yolo-Resnet-Tracker 的输出
|
||||
# fpath = os.path.join(eventSourcePath, evtname)
|
||||
# if os.path.isfile(fpath) and (ext==".pkl" or ext==".pickle"):
|
||||
# evt = bname.split('_')
|
||||
# elif os.path.isdir(fpath):
|
||||
# evt = evtname.split('_')
|
||||
# else:
|
||||
# continue
|
||||
|
||||
# if len(evt)>=2 and evt[-1].isdigit() and len(evt[-1])>=10:
|
||||
# bcdList.append(evt[-1])
|
||||
# event_spath.append(fpath)
|
||||
|
||||
# '''==== 1. 生成标准特征集, 只需运行一次, 在 genfeats.py 中实现 ==========='''
|
||||
# bcdSet = set(bcdList)
|
||||
# gen_bcd_features(stdSamplePath, stdBarcodePath, stdFeaturePath, bcdSet)
|
||||
# print("stdFeats have generated and saved!")
|
||||
|
||||
# '''==== 2. 生成事件字典, 只需运行一次 ==============='''
|
||||
# gen_eventdict(event_spath)
|
||||
# print("eventList have generated and saved!")
|
||||
|
||||
def get_evtList():
|
||||
|
||||
'''==== 0. 生成事件列表和对应的 Barcodes 集合 ==========='''
|
||||
bcdList, evtpaths = [], []
|
||||
for evtname in os.listdir(eventSourcePath):
|
||||
bname, ext = os.path.splitext(evtname)
|
||||
|
||||
## 处理事件的两种情况:文件夹 和 Yolo-Resnet-Tracker 的输出
|
||||
fpath = os.path.join(eventSourcePath, evtname)
|
||||
if os.path.isfile(fpath) and (ext==".pkl" or ext==".pickle"):
|
||||
evt = bname.split('_')
|
||||
elif os.path.isdir(fpath):
|
||||
evt = evtname.split('_')
|
||||
else:
|
||||
continue
|
||||
|
||||
if len(evt)>=2 and evt[-1].isdigit() and len(evt[-1])>=10:
|
||||
bcdList.append(evt[-1])
|
||||
evtpaths.append(fpath)
|
||||
|
||||
bcdSet = set(bcdList)
|
||||
|
||||
return evtpaths, bcdSet
|
||||
|
||||
|
||||
|
||||
# def init_stdDict():
|
||||
# evtpaths, bcdSet = get_evtList()
|
||||
# gen_bcd_features(stdSamplePath, stdBarcodePath, stdFeaturePath, bcdSet)
|
||||
# print("stdFeats have generated and saved!")
|
||||
|
||||
|
||||
# def init_evtDict():
|
||||
# '''==== 0. 生成事件列表和对应的 Barcodes列表 ==========='''
|
||||
# bcdList, event_spath = [], []
|
||||
# for evtname in os.listdir(eventSourcePath):
|
||||
# bname, ext = os.path.splitext(evtname)
|
||||
|
||||
# ## 处理事件的两种情况:文件夹 和 Yolo-Resnet-Tracker 的输出
|
||||
# fpath = os.path.join(eventSourcePath, evtname)
|
||||
# if os.path.isfile(fpath) and (ext==".pkl" or ext==".pickle"):
|
||||
# evt = bname.split('_')
|
||||
# elif os.path.isdir(fpath):
|
||||
# evt = evtname.split('_')
|
||||
# else:
|
||||
# continue
|
||||
|
||||
# if len(evt)>=2 and evt[-1].isdigit() and len(evt[-1])>=10:
|
||||
# bcdList.append(evt[-1])
|
||||
# event_spath.append(fpath)
|
||||
|
||||
# '''==== 2. 生成事件字典, 只需运行一次 ==============='''
|
||||
# gen_eventdict(event_spath)
|
||||
# print("eventList have generated and saved!")
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def test_one2one_one2SN():
|
||||
def test_one2one_one2SN(simType):
|
||||
'''1:1性能评估'''
|
||||
|
||||
# evtpaths, bcdSet = get_evtList()
|
||||
# evtpaths, bcdSet = get_evtList(eventSourcePath)
|
||||
|
||||
'''=== 1. 只需运行一次,生成事件对应的标准特征库字典,如已生成,无需运行 ===='''
|
||||
# gen_bcd_features(stdSamplePath, stdBarcodePath, stdFeaturePath, eventSourcePath)
|
||||
|
||||
'''==== 2. 生成事件字典, 只需运行一次 ===================='''
|
||||
# init_eventDict(eventSourcePath, eventDataPath, source_type)
|
||||
|
||||
# date_ = ['2025-3-4_1', '2025-3-5_1', '2025-3-5_2']
|
||||
# for dt in date_:
|
||||
# evtpaths = os.path.join(eventSourcePath, dt)
|
||||
# init_eventDict(evtpaths, eventDataPath, source_type)
|
||||
|
||||
init_eventDict(eventSourcePath, eventDataPath, source_type)
|
||||
|
||||
|
||||
|
||||
'''==== 2. 基于事件barcode集和标准库barcode交集构造事件集合 ========='''
|
||||
'''==== 3. 基于事件barcode集和标准库barcode交集构造事件集合 ========='''
|
||||
evtList, evtDict, stdDict = build_std_evt_dict()
|
||||
|
||||
one2one_pr(evtList, evtDict, stdDict)
|
||||
one2one_pr(evtList, evtDict, stdDict, simType)
|
||||
|
||||
one2SN_pr(evtList, evtDict, stdDict)
|
||||
one2SN_pr(evtList, evtDict, stdDict, simType)
|
||||
|
||||
if __name__ == '__main__':
|
||||
'''
|
||||
@ -694,20 +414,9 @@ if __name__ == '__main__':
|
||||
(7) similPath: 1:1比对结果存储地址(事件级),在resultPath下
|
||||
'''
|
||||
|
||||
# stdSamplePath = r"\\192.168.1.28\share\数据\已完成数据\展厅数据\v1.0\比对数据\整理\zhantingBase"
|
||||
# stdBarcodePath = r"D:\exhibition\dataset\bcdpath"
|
||||
# stdFeaturePath = r"\\192.168.1.28\share\数据\已完成数据\比对数据\barcode\all_totalBarocde\features_json\v11_barcode_11592"
|
||||
|
||||
# eventSourcePath = r'D:\exhibition\images\20241202'
|
||||
# eventSourcePath = r"\\192.168.1.28\share\测试视频数据以及日志\各模块测试记录\展厅测试\1129_展厅模型v801测试组测试"
|
||||
|
||||
# stdSamplePath = r"\\192.168.1.28\share\数据\已完成数据\展厅数据\v2.0_abroad\比对数据\all_base_二筛"
|
||||
# stdBarcodePath = r"\\192.168.1.28\share\测试视频数据以及日志\海外展厅测试数据\比对测试数据20250121_testing\bcdpath"
|
||||
# stdFeaturePath = r"\\192.168.1.28\share\测试视频数据以及日志\海外展厅测试数据\比对测试数据20250121_testing\stdfeats"
|
||||
|
||||
stdSamplePath = r"\\192.168.1.28\share\数据\已完成数据\比对数据\barcode\all_totalBarocde\totalBarcode"
|
||||
stdBarcodePath = r"\\192.168.1.28\share\测试视频数据以及日志\全实时测试\testing\bcdpath"
|
||||
stdFeaturePath = r"\\192.168.1.28\share\数据\已完成数据\比对数据\barcode\all_totalBarocde\features_json\v11_barcode_0304"
|
||||
stdSamplePath = "/home/wqg/dataset/total_barcode/totalBarcode"
|
||||
stdBarcodePath = "/home/wqg/dataset/total_barcode/bcdpath"
|
||||
stdFeaturePath = "/home/wqg/dataset/test_dataset/total_barcode/features_json/v11_barcode_0304/"
|
||||
|
||||
if not os.path.exists(stdBarcodePath):
|
||||
os.makedirs(stdBarcodePath)
|
||||
@ -719,18 +428,24 @@ if __name__ == '__main__':
|
||||
"data": 基于事件切分的原 data 文件版本
|
||||
"realtime": 全实时生成的 data 文件
|
||||
'''
|
||||
source_type = 'realtime' # 'source', 'data', 'realtime'
|
||||
eventSourcePath = r"\\192.168.1.28\share\测试视频数据以及日志\全实时测试\V12\基准数据集\2025-3-4_1"
|
||||
resultPath = r"\\192.168.1.28\share\测试视频数据以及日志\全实时测试\testing"
|
||||
source_type = 'source' # 'source', 'data', 'realtime'
|
||||
simType = "typea" # "simple", "typea", "typeb"
|
||||
|
||||
eventDataPath = os.path.join(resultPath, "evtobjs_0304_1")
|
||||
similPath = os.path.join(resultPath, "simidata_0304_1")
|
||||
evttype = "single_event_V10"
|
||||
# evttype = "single_event_V5"
|
||||
# evttype = "performence_V10"
|
||||
# evttype = "performence_V5"
|
||||
eventSourcePath = "/home/wqg/dataset/pipeline/yrt/{}/shopping_pkl".format(evttype)
|
||||
|
||||
resultPath = "/home/wqg/dataset/pipeline/contrast/{}".format(evttype)
|
||||
eventDataPath = os.path.join(resultPath, "evtobjs")
|
||||
similPath = os.path.join(resultPath, "simidata")
|
||||
if not os.path.exists(eventDataPath):
|
||||
os.makedirs(eventDataPath)
|
||||
if not os.path.exists(similPath):
|
||||
os.makedirs(similPath)
|
||||
|
||||
test_one2one_one2SN()
|
||||
test_one2one_one2SN(simType)
|
||||
|
||||
|
||||
|
||||
|
@ -16,7 +16,11 @@ from pathlib import Path
|
||||
import matplotlib.pyplot as plt
|
||||
import sys
|
||||
|
||||
sys.path.append(r"D:\DetectTracking")
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[1] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT))
|
||||
|
||||
from tracking.utils.read_data import read_similar
|
||||
|
||||
def read_one2one_data(filepath):
|
||||
@ -531,21 +535,23 @@ def contrast_pr(evtPaths):
|
||||
|
||||
|
||||
# bcdSet = set(bcdList)
|
||||
one2nErrFile = os.path.join(evtPaths, "one_2_Small_n_Error.txt")
|
||||
with open(one2nErrFile, "w") as file:
|
||||
for item in fnevents:
|
||||
file.write(item + "\n")
|
||||
|
||||
one2NErrFile = os.path.join(evtPaths, "one_2_Big_N_Error.txt")
|
||||
with open(one2NErrFile, "w") as file:
|
||||
for item in fn_events:
|
||||
file.write(item + "\n")
|
||||
|
||||
# one2nErrFile = os.path.join(evtPaths, "one_2_Small_n_Error.txt")
|
||||
# with open(one2nErrFile, "w") as file:
|
||||
# for item in fnevents:
|
||||
# file.write(item + "\n")
|
||||
|
||||
# one2NErrFile = os.path.join(evtPaths, "one_2_Big_N_Error.txt")
|
||||
# with open(one2NErrFile, "w") as file:
|
||||
# for item in fn_events:
|
||||
# file.write(item + "\n")
|
||||
|
||||
print('Done!')
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
evtpaths = r"\\192.168.1.28\share\测试视频数据以及日志\全实时测试\V12\2025-3-3"
|
||||
evtpaths = r"/home/wqg/dataset/test_base_dataset/single_event/source"
|
||||
contrast_pr(evtpaths)
|
||||
|
||||
|
||||
|
172
contrast/trail2trail.py
Normal file
172
contrast/trail2trail.py
Normal file
@ -0,0 +1,172 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
取出再放回场景下商品轨迹特征比对方式与性能分析
|
||||
|
||||
Created on Tue Apr 1 17:17:47 2025
|
||||
@author: wqg
|
||||
"""
|
||||
import os
|
||||
import pickle
|
||||
import random
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
import matplotlib.pyplot as plt
|
||||
from scipy.spatial.distance import cdist
|
||||
from utils.calsimi import calsiml, calsimi_vs_evts
|
||||
|
||||
def read_eventdict(evtpaths):
|
||||
evtDict = {}
|
||||
for filename in os.listdir(evtpaths):
|
||||
evtname, ext = os.path.splitext(filename)
|
||||
if ext != ".pickle": continue
|
||||
|
||||
evtpath = os.path.join(evtpaths, filename)
|
||||
with open(evtpath, 'rb') as f:
|
||||
evtdata = pickle.load(f)
|
||||
evtDict[evtname] = evtdata
|
||||
|
||||
|
||||
return evtDict
|
||||
|
||||
|
||||
|
||||
def compute_show_pr(Same, Cross):
|
||||
TPFN = len(Same)
|
||||
TNFP = len(Cross)
|
||||
|
||||
Recall_Pos, Recall_Neg = [], []
|
||||
Precision_Pos, Precision_Neg = [], []
|
||||
Correct = []
|
||||
Thresh = np.linspace(-0.2, 1, 100)
|
||||
for th in Thresh:
|
||||
TP = np.sum(Same >= th)
|
||||
FN = np.sum(Same < th)
|
||||
# FN = TPFN - TP
|
||||
|
||||
TN = np.sum(Cross < th)
|
||||
FP = np.sum(Cross >= th)
|
||||
# FP = TNFP - TN
|
||||
|
||||
|
||||
Precision_Pos.append(TP/(TP+FP+1e-6))
|
||||
Precision_Neg.append(TN/(TN+FN+1e-6))
|
||||
Recall_Pos.append(TP/(TP+FN+1e-6))
|
||||
Recall_Neg.append(TN/(TN+FP+1e-6))
|
||||
|
||||
# Recall_Pos.append(TP/TPFN)
|
||||
# Recall_Neg.append(TN/TNFP)
|
||||
|
||||
|
||||
Correct.append((TN+TP)/(TPFN+TNFP))
|
||||
|
||||
fig, ax = plt.subplots()
|
||||
|
||||
ax.plot(Thresh, Precision_Pos, 'r', label='Precision_Pos: TP/(TP+FP)')
|
||||
ax.plot(Thresh, Recall_Pos, 'b', label='Recall_Pos: TP/TPFN')
|
||||
ax.plot(Thresh, Recall_Neg, 'g', label='Recall_Neg: TN/TNFP')
|
||||
ax.plot(Thresh, Correct, 'c', label='Correct: (TN+TP)/(TPFN+TNFP)')
|
||||
ax.plot(Thresh, Precision_Neg, 'm', label='Precision_Neg: TN/(TN+FN)')
|
||||
|
||||
ax.set_xlim([0, 1])
|
||||
ax.set_ylim([0, 1])
|
||||
|
||||
ax.set_xticks(np.arange(0, 1, 0.1))
|
||||
ax.set_yticks(np.arange(0, 1, 0.1))
|
||||
ax.grid(True, linestyle='--')
|
||||
|
||||
ax.set_title('PrecisePos & PreciseNeg')
|
||||
ax.set_xlabel(f"Same Num: {TPFN}, Cross Num: {TNFP}")
|
||||
ax.legend()
|
||||
plt.show()
|
||||
|
||||
# rltpath = os.path.join(similPath, f'pr_1to1_{simType}.png')
|
||||
# plt.savefig(rltpath) # svg, png, pdf
|
||||
|
||||
|
||||
fig, axes = plt.subplots(2,1)
|
||||
axes[0].hist(Same, bins=60, range=(-0.2, 1), edgecolor='black')
|
||||
axes[0].set_xlim([-0.2, 1])
|
||||
axes[0].set_title(f'TP({len(Same)})')
|
||||
|
||||
axes[1].hist(Cross, bins=60, range=(-0.2, 1), edgecolor='black')
|
||||
axes[1].set_xlim([-0.2, 1])
|
||||
axes[1].set_title(f'TN({len(Cross)})')
|
||||
|
||||
# rltpath = os.path.join(similPath, f'hist_1to1_{simType}.png')
|
||||
# plt.savefig(rltpath)
|
||||
|
||||
|
||||
plt.show()
|
||||
|
||||
|
||||
|
||||
def trail_to_trail(evtpaths, rltpaths):
|
||||
# select the method type of how to calculate the feat similarity of trail
|
||||
simType = 2
|
||||
|
||||
##1. read all the ShoppingEvent object in the dir 'evtpaths'
|
||||
evtDicts = read_eventdict(evtpaths)
|
||||
|
||||
##2. Combine event object with the same barcode
|
||||
barcodes, evtpairDict = [], {}
|
||||
for k in evtDicts.keys():
|
||||
evt = k.split('_')
|
||||
condt = len(evt)>=2 and evt[-1].isdigit() and len(evt[-1])>=10
|
||||
if not condt: continue
|
||||
|
||||
barcode = evt[-1]
|
||||
if barcode not in evtpairDict.keys():
|
||||
evtpairDict[barcode] = []
|
||||
barcodes.append(barcode)
|
||||
|
||||
evtpairDict[barcode].append(evtDicts[k])
|
||||
barcodes = set(barcodes)
|
||||
|
||||
AA_list, AB_list = [], []
|
||||
for barcode in evtpairDict.keys():
|
||||
events = evtpairDict[barcode]
|
||||
if len(events)>1:
|
||||
evta, evtb = random.sample(events, 2)
|
||||
AA_list.append((evta, evtb, "same"))
|
||||
|
||||
evtc = random.sample(events, 1)[0]
|
||||
|
||||
dset = list(barcodes.symmetric_difference(set([barcode])))
|
||||
bcd = random.sample(dset, 1)[0]
|
||||
evtd = random.sample(evtpairDict[bcd], 1)[0]
|
||||
AB_list.append((evtc, evtd, "diff"))
|
||||
|
||||
mergePairs = AA_list + AB_list
|
||||
|
||||
##3. calculate the similar of two event: evta, evtb
|
||||
new_pirs = []
|
||||
for evta, evtb, label in mergePairs:
|
||||
similar = calsimi_vs_evts(evta, evtb, simType)
|
||||
if similar is None:
|
||||
continue
|
||||
new_pirs.append((label, round(similar, 3), evta.evtname[:15], evtb.evtname[:15]))
|
||||
|
||||
##4. compute PR and showing
|
||||
Same = np.array([s for label, s, _, _ in new_pirs if label=="same"])
|
||||
Cross = np.array([s for label, s, _, _ in new_pirs if label=="diff"])
|
||||
compute_show_pr(Same, Cross)
|
||||
|
||||
|
||||
def main():
|
||||
evttypes = ["single_event_V10", "single_event_V5", "performence_V10", "performence_V5"]
|
||||
# evttypes = ["single_event_V10"]
|
||||
|
||||
for evttype in evttypes:
|
||||
evtpaths = "/home/wqg/dataset/pipeline/contrast/{}/evtobjs/".format(evttype)
|
||||
rltpaths = "/home/wqg/dataset/pipeline/yrt/{}/yolos_tracking".format(evttype)
|
||||
|
||||
trail_to_trail(evtpaths, rltpaths)
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
||||
|
BIN
contrast/utils/__pycache__/__init__.cpython-312.pyc
Normal file
BIN
contrast/utils/__pycache__/__init__.cpython-312.pyc
Normal file
Binary file not shown.
BIN
contrast/utils/__pycache__/calsimi.cpython-312.pyc
Normal file
BIN
contrast/utils/__pycache__/calsimi.cpython-312.pyc
Normal file
Binary file not shown.
BIN
contrast/utils/__pycache__/databits.cpython-312.pyc
Normal file
BIN
contrast/utils/__pycache__/databits.cpython-312.pyc
Normal file
Binary file not shown.
BIN
contrast/utils/__pycache__/event.cpython-312.pyc
Normal file
BIN
contrast/utils/__pycache__/event.cpython-312.pyc
Normal file
Binary file not shown.
BIN
contrast/utils/__pycache__/tools.cpython-312.pyc
Normal file
BIN
contrast/utils/__pycache__/tools.cpython-312.pyc
Normal file
Binary file not shown.
216
contrast/utils/calsimi.py
Normal file
216
contrast/utils/calsimi.py
Normal file
@ -0,0 +1,216 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Mon Mar 31 16:25:43 2025
|
||||
|
||||
@author: wqg
|
||||
"""
|
||||
import numpy as np
|
||||
from scipy.spatial.distance import cdist
|
||||
|
||||
|
||||
def get_topk_percent(data, k):
|
||||
"""
|
||||
获取数据中最大的 k% 的元素
|
||||
"""
|
||||
# 将数据转换为 NumPy 数组
|
||||
if isinstance(data, list):
|
||||
data = np.array(data)
|
||||
|
||||
percentile = np.percentile(data, 100-k)
|
||||
top_k_percent = data[data >= percentile]
|
||||
|
||||
return top_k_percent
|
||||
def cluster(data, thresh=0.15):
|
||||
# data = np.array([0.1, 0.13, 0.7, 0.2, 0.8, 0.52, 0.3, 0.7, 0.85, 0.58])
|
||||
# data = np.array([0.1, 0.13, 0.2, 0.3])
|
||||
# data = np.array([0.1])
|
||||
|
||||
if isinstance(data, list):
|
||||
data = np.array(data)
|
||||
|
||||
data1 = np.sort(data)
|
||||
cluter, Cluters, = [data1[0]], []
|
||||
for i in range(1, len(data1)):
|
||||
if data1[i] - data1[i-1]< thresh:
|
||||
cluter.append(data1[i])
|
||||
else:
|
||||
Cluters.append(cluter)
|
||||
cluter = [data1[i]]
|
||||
Cluters.append(cluter)
|
||||
|
||||
clt_center = []
|
||||
for clt in Cluters:
|
||||
## 是否应该在此处限制一个聚类中的最小轨迹样本数,应该将该因素放在轨迹分析中
|
||||
# if len(clt)>=3:
|
||||
# clt_center.append(np.mean(clt))
|
||||
clt_center.append(np.mean(clt))
|
||||
|
||||
# print(clt_center)
|
||||
|
||||
return clt_center
|
||||
|
||||
def calsiml(feat1, feat2, topkp=75, cluth=0.15):
|
||||
'''轨迹样本和标准特征集样本相似度的选择策略'''
|
||||
matrix = 1 - cdist(feat1, feat2, 'cosine')
|
||||
simi_max = []
|
||||
for i in range(len(matrix)):
|
||||
sim = np.mean(get_topk_percent(matrix[i, :], topkp))
|
||||
simi_max.append(sim)
|
||||
cltc_max = cluster(simi_max, cluth)
|
||||
Simi = max(cltc_max)
|
||||
|
||||
## cltc_max为空属于编程考虑不周,应予以排查解决
|
||||
# if len(cltc_max):
|
||||
# Simi = max(cltc_max)
|
||||
# else:
|
||||
# Simi = 0 #不应该走到该处
|
||||
|
||||
return Simi
|
||||
|
||||
|
||||
def calsimi_vs_stdfeat_new(event, stdfeat):
|
||||
'''事件与标准库的对比策略
|
||||
该比对策略是否可以拓展到事件与事件的比对?
|
||||
'''
|
||||
front_boxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
|
||||
front_feats = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||
for i in range(len(event.front_boxes)):
|
||||
front_boxes = np.concatenate((front_boxes, event.front_boxes[i]), axis=0)
|
||||
front_feats = np.concatenate((front_feats, event.front_feats[i]), axis=0)
|
||||
|
||||
back_boxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
|
||||
back_feats = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||
for i in range(len(event.back_boxes)):
|
||||
back_boxes = np.concatenate((back_boxes, event.back_boxes[i]), axis=0)
|
||||
back_feats = np.concatenate((back_feats, event.back_feats[i]), axis=0)
|
||||
|
||||
front_simi, back_simi = None, None
|
||||
if len(front_feats):
|
||||
front_simi = calsiml(front_feats, stdfeat)
|
||||
if len(back_feats):
|
||||
back_simi = calsiml(back_feats, stdfeat)
|
||||
|
||||
'''前后摄相似度融合策略'''
|
||||
if len(front_feats) and len(back_feats):
|
||||
diff_simi = abs(front_simi - back_simi)
|
||||
if diff_simi>0.15:
|
||||
Similar = max([front_simi, back_simi])
|
||||
else:
|
||||
Similar = (front_simi+back_simi)/2
|
||||
elif len(front_feats) and len(back_feats)==0:
|
||||
Similar = front_simi
|
||||
elif len(front_feats)==0 and len(back_feats):
|
||||
Similar = back_simi
|
||||
else:
|
||||
Similar = None # 在event.front_feats和event.back_feats同时为空时
|
||||
|
||||
return Similar, front_simi, back_simi
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def calsimi_vs_stdfeat(event, stdfeat):
|
||||
evtfeat = event.feats_compose
|
||||
if isinstance(event.feats_select, list):
|
||||
if len(event.feats_select) and len(event.feats_select[0]):
|
||||
evtfeat = event.feats_select[0]
|
||||
else:
|
||||
return None, None, None
|
||||
else:
|
||||
evtfeat = event.feats_select
|
||||
|
||||
if len(evtfeat)==0 or len(stdfeat)==0:
|
||||
return None, None, None
|
||||
|
||||
|
||||
evtfeat /= np.linalg.norm(evtfeat, axis=1)[:, None]
|
||||
stdfeat /= np.linalg.norm(stdfeat, axis=1)[:, None]
|
||||
|
||||
matrix = 1 - cdist(evtfeat, stdfeat, 'cosine')
|
||||
matrix[matrix < 0] = 0
|
||||
|
||||
simi_mean = np.mean(matrix)
|
||||
simi_max = np.max(matrix)
|
||||
stdfeatm = np.mean(stdfeat, axis=0, keepdims=True)
|
||||
evtfeatm = np.mean(evtfeat, axis=0, keepdims=True)
|
||||
simi_mfeat = 1- np.maximum(0.0, cdist(stdfeatm, evtfeatm, 'cosine'))
|
||||
|
||||
return simi_mean, simi_max, simi_mfeat[0,0]
|
||||
|
||||
|
||||
def calsimi_vs_evts(evta, evtb, simType=1):
|
||||
if simType==1:
|
||||
if len(evta.feats_compose) and len(evtb.feats_compose):
|
||||
feata = evta.feats_compose
|
||||
featb = evtb.feats_compose
|
||||
matrix = 1 - cdist(feata, featb, 'cosine')
|
||||
similar = np.mean(matrix)
|
||||
else:
|
||||
similar = None
|
||||
return similar
|
||||
|
||||
if simType==2:
|
||||
if len(evta.feats_compose) and len(evtb.feats_compose):
|
||||
feata = evta.feats_compose
|
||||
featb = evtb.feats_compose
|
||||
matrix = 1 - cdist(feata, featb, 'cosine')
|
||||
similar = np.max(matrix)
|
||||
else:
|
||||
similar = None
|
||||
return similar
|
||||
|
||||
if simType==3:
|
||||
if len(evta.feats_compose) and len(evtb.feats_compose):
|
||||
feata = evta.feats_compose
|
||||
featb = evtb.feats_compose
|
||||
similar = calsiml(feata, featb)
|
||||
else:
|
||||
similar = None
|
||||
return similar
|
||||
|
||||
|
||||
##1. the front feats of evta, evtb
|
||||
fr_feata = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||
for i in range(len(evta.front_feats)):
|
||||
fr_feata = np.concatenate((fr_feata, evta.front_feats[i]), axis=0)
|
||||
|
||||
fr_featb = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||
for i in range(len(evtb.front_feats)):
|
||||
fr_featb = np.concatenate((fr_featb, evtb.front_feats[i]), axis=0)
|
||||
|
||||
##2. the back feats of evta, evtb
|
||||
bk_feata = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||
for i in range(len(evta.back_feats)):
|
||||
bk_feata = np.concatenate((bk_feata, evta.back_feats[i]), axis=0)
|
||||
|
||||
bk_featb = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
|
||||
for i in range(len(evtb.back_feats)):
|
||||
bk_featb = np.concatenate((bk_featb, evtb.back_feats[i]), axis=0)
|
||||
|
||||
|
||||
front_simi, back_simi = None, None
|
||||
if len(fr_feata) and len(fr_featb):
|
||||
front_simi = calsiml(fr_feata, fr_featb)
|
||||
|
||||
if len(bk_feata) and len(bk_featb):
|
||||
back_simi = calsiml(bk_feata, bk_featb)
|
||||
|
||||
'''前后摄相似度融合策略'''
|
||||
if front_simi is not None and back_simi is not None:
|
||||
diff_simi = abs(front_simi - back_simi)
|
||||
if diff_simi>0.15:
|
||||
similar = max([front_simi, back_simi])
|
||||
else:
|
||||
similar = (front_simi+back_simi)/2
|
||||
elif front_simi is not None and back_simi is None:
|
||||
similar = front_simi
|
||||
elif front_simi is None and back_simi is not None:
|
||||
similar = back_simi
|
||||
else:
|
||||
similar = None # 在event.front_feats和event.back_feats同时为空时
|
||||
|
||||
return similar
|
||||
|
||||
|
127
contrast/utils/databits.py
Normal file
127
contrast/utils/databits.py
Normal file
@ -0,0 +1,127 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Tue Apr 1 16:43:04 2025
|
||||
|
||||
@author: wqg
|
||||
"""
|
||||
import os
|
||||
import pickle
|
||||
import numpy as np
|
||||
from scipy.spatial.distance import cdist
|
||||
|
||||
|
||||
def int8_to_ft16(arr_uint8, amin, amax):
|
||||
arr_ft16 = (arr_uint8 / 255 * (amax-amin) + amin).astype(np.float16)
|
||||
|
||||
return arr_ft16
|
||||
|
||||
def ft16_to_uint8(arr_ft16):
|
||||
# pickpath = r"\\192.168.1.28\share\测试_202406\contrast\std_features_ft32vsft16\6902265587712_ft16.pickle"
|
||||
|
||||
# with open(pickpath, 'rb') as f:
|
||||
# edict = pickle.load(f)
|
||||
|
||||
# arr_ft16 = edict['feats']
|
||||
|
||||
amin = np.min(arr_ft16)
|
||||
amax = np.max(arr_ft16)
|
||||
arr_ft255 = (arr_ft16 - amin) * 255 / (amax-amin)
|
||||
arr_uint8 = arr_ft255.astype(np.uint8)
|
||||
|
||||
arr_ft16_ = int8_to_ft16(arr_uint8, amin, amax)
|
||||
|
||||
arrDistNorm = np.linalg.norm(arr_ft16_ - arr_ft16) / arr_ft16_.size
|
||||
|
||||
return arr_uint8, arr_ft16_
|
||||
|
||||
|
||||
def data_precision_compare(stdfeat, evtfeat, evtMessage, similPath='', save=True):
|
||||
evt, stdbcd, label = evtMessage
|
||||
rltdata, rltdata_ft16, rltdata_ft16_ = [], [], []
|
||||
|
||||
matrix = 1 - cdist(stdfeat, evtfeat, 'cosine')
|
||||
simi_mean = np.mean(matrix)
|
||||
simi_max = np.max(matrix)
|
||||
stdfeatm = np.mean(stdfeat, axis=0, keepdims=True)
|
||||
evtfeatm = np.mean(evtfeat, axis=0, keepdims=True)
|
||||
simi_mfeat = 1- np.maximum(0.0, cdist(stdfeatm, evtfeatm, 'cosine'))
|
||||
rltdata = [label, stdbcd, evt, simi_mean, simi_max, simi_mfeat[0,0]]
|
||||
|
||||
|
||||
##================================================================= float16
|
||||
stdfeat_ft16 = stdfeat.astype(np.float16)
|
||||
evtfeat_ft16 = evtfeat.astype(np.float16)
|
||||
stdfeat_ft16 /= np.linalg.norm(stdfeat_ft16, axis=1)[:, None]
|
||||
evtfeat_ft16 /= np.linalg.norm(evtfeat_ft16, axis=1)[:, None]
|
||||
|
||||
|
||||
matrix_ft16 = 1 - cdist(stdfeat_ft16, evtfeat_ft16, 'cosine')
|
||||
simi_mean_ft16 = np.mean(matrix_ft16)
|
||||
simi_max_ft16 = np.max(matrix_ft16)
|
||||
stdfeatm_ft16 = np.mean(stdfeat_ft16, axis=0, keepdims=True)
|
||||
evtfeatm_ft16 = np.mean(evtfeat_ft16, axis=0, keepdims=True)
|
||||
simi_mfeat_ft16 = 1- np.maximum(0.0, cdist(stdfeatm_ft16, evtfeatm_ft16, 'cosine'))
|
||||
rltdata_ft16 = [label, stdbcd, evt, simi_mean_ft16, simi_max_ft16, simi_mfeat_ft16[0,0]]
|
||||
|
||||
'''****************** uint8 is ok!!!!!! ******************'''
|
||||
##=================================================================== uint8
|
||||
# stdfeat_uint8, stdfeat_ft16_ = ft16_to_uint8(stdfeat_ft16)
|
||||
# evtfeat_uint8, evtfeat_ft16_ = ft16_to_uint8(evtfeat_ft16)
|
||||
|
||||
stdfeat_uint8 = (stdfeat_ft16*128).astype(np.int8)
|
||||
evtfeat_uint8 = (evtfeat_ft16*128).astype(np.int8)
|
||||
stdfeat_ft16_ = stdfeat_uint8.astype(np.float16)/128
|
||||
evtfeat_ft16_ = evtfeat_uint8.astype(np.float16)/128
|
||||
|
||||
absdiff = np.linalg.norm(stdfeat_ft16_ - stdfeat) / stdfeat.size
|
||||
|
||||
matrix_ft16_ = 1 - cdist(stdfeat_ft16_, evtfeat_ft16_, 'cosine')
|
||||
simi_mean_ft16_ = np.mean(matrix_ft16_)
|
||||
simi_max_ft16_ = np.max(matrix_ft16_)
|
||||
stdfeatm_ft16_ = np.mean(stdfeat_ft16_, axis=0, keepdims=True)
|
||||
evtfeatm_ft16_ = np.mean(evtfeat_ft16_, axis=0, keepdims=True)
|
||||
simi_mfeat_ft16_ = 1- np.maximum(0.0, cdist(stdfeatm_ft16_, evtfeatm_ft16_, 'cosine'))
|
||||
rltdata_ft16_ = [label, stdbcd, evt, simi_mean_ft16_, simi_max_ft16_, simi_mfeat_ft16_[0,0]]
|
||||
|
||||
if not save:
|
||||
return
|
||||
|
||||
|
||||
##========================================================= save as float32
|
||||
rppath = os.path.join(similPath, f'{evt}_ft32.pickle')
|
||||
with open(rppath, 'wb') as f:
|
||||
pickle.dump(rltdata, f)
|
||||
|
||||
rtpath = os.path.join(similPath, f'{evt}_ft32.txt')
|
||||
with open(rtpath, 'w', encoding='utf-8') as f:
|
||||
for result in rltdata:
|
||||
part = [f"{x:.3f}" if isinstance(x, float) else str(x) for x in result]
|
||||
line = ', '.join(part)
|
||||
f.write(line + '\n')
|
||||
|
||||
|
||||
##========================================================= save as float16
|
||||
rppath_ft16 = os.path.join(similPath, f'{evt}_ft16.pickle')
|
||||
with open(rppath_ft16, 'wb') as f:
|
||||
pickle.dump(rltdata_ft16, f)
|
||||
|
||||
rtpath_ft16 = os.path.join(similPath, f'{evt}_ft16.txt')
|
||||
with open(rtpath_ft16, 'w', encoding='utf-8') as f:
|
||||
for result in rltdata_ft16:
|
||||
part = [f"{x:.3f}" if isinstance(x, float) else str(x) for x in result]
|
||||
line = ', '.join(part)
|
||||
f.write(line + '\n')
|
||||
|
||||
|
||||
##=========================================================== save as uint8
|
||||
rppath_uint8 = os.path.join(similPath, f'{evt}_uint8.pickle')
|
||||
with open(rppath_uint8, 'wb') as f:
|
||||
pickle.dump(rltdata_ft16_, f)
|
||||
|
||||
rtpath_uint8 = os.path.join(similPath, f'{evt}_uint8.txt')
|
||||
with open(rtpath_uint8, 'w', encoding='utf-8') as f:
|
||||
for result in rltdata_ft16_:
|
||||
part = [f"{x:.3f}" if isinstance(x, float) else str(x) for x in result]
|
||||
line = ', '.join(part)
|
||||
f.write(line + '\n')
|
@ -5,19 +5,25 @@ Created on Tue Nov 26 17:35:05 2024
|
||||
@author: ym
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
import cv2
|
||||
import pickle
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
|
||||
import sys
|
||||
sys.path.append(r"D:\DetectTracking")
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[2] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT))
|
||||
|
||||
from tracking.utils.plotting import Annotator, colors
|
||||
from tracking.utils.drawtracks import drawTrack
|
||||
from tracking.utils.read_data import extract_data, read_tracking_output, read_similar
|
||||
from tracking.utils.read_data import extract_data_realtime, read_tracking_output_realtime
|
||||
|
||||
|
||||
|
||||
|
||||
# import platform
|
||||
# import pathlib
|
||||
# plt = platform.system()
|
||||
|
@ -4,8 +4,81 @@ Created on Thu Oct 31 15:17:01 2024
|
||||
|
||||
@author: ym
|
||||
"""
|
||||
import os
|
||||
import numpy as np
|
||||
import pickle
|
||||
from pathlib import Path
|
||||
import matplotlib.pyplot as plt
|
||||
from .event import ShoppingEvent
|
||||
|
||||
def init_eventDict(sourcePath, eventDataPath, stype="data"):
|
||||
'''
|
||||
stype: str,
|
||||
'source': 由 videos 或 images 生成的 pickle 文件
|
||||
'data': 从 data 文件中读取的现场运行数据
|
||||
"realtime": 全实时数据,从 data 文件中读取的现场运行数据
|
||||
|
||||
sourcePath:事件文件夹,事件类型包含2种:
|
||||
(1) pipeline生成的 pickle 文件
|
||||
(2) 直接采集的事件文件夹
|
||||
'''
|
||||
k, errEvents = 0, []
|
||||
for evtname in os.listdir(sourcePath):
|
||||
bname, ext = os.path.splitext(evtname)
|
||||
source_path = os.path.join(sourcePath, evtname)
|
||||
|
||||
if stype=="source" and ext not in ['.pkl', '.pickle']: continue
|
||||
if stype=="data" and os.path.isfile(source_path): continue
|
||||
if stype=="realtime" and os.path.isfile(source_path): continue
|
||||
|
||||
evt = bname.split('_')
|
||||
condt = len(evt)>=2 and evt[-1].isdigit() and len(evt[-1])>=10
|
||||
if not condt: continue
|
||||
|
||||
pickpath = os.path.join(eventDataPath, f"{bname}.pickle")
|
||||
if os.path.isfile(pickpath): continue
|
||||
|
||||
# event = ShoppingEvent(source_path, stype)
|
||||
try:
|
||||
event = ShoppingEvent(source_path, stype)
|
||||
with open(pickpath, 'wb') as f:
|
||||
pickle.dump(event, f)
|
||||
print(evtname)
|
||||
except Exception as e:
|
||||
errEvents.append(source_path)
|
||||
print(f"Error: {evtname}, {e}")
|
||||
# k += 1
|
||||
# if k==1:
|
||||
# break
|
||||
|
||||
errfile = Path(eventDataPath).parent / 'error_events.txt'
|
||||
with open(str(errfile), 'a', encoding='utf-8') as f:
|
||||
for line in errEvents:
|
||||
f.write(line + '\n')
|
||||
|
||||
|
||||
def get_evtList(evtpath):
|
||||
'''==== 0. 生成事件列表和对应的 Barcodes 集合 ==========='''
|
||||
bcdList, evtpaths = [], []
|
||||
for evtname in os.listdir(evtpath):
|
||||
bname, ext = os.path.splitext(evtname)
|
||||
|
||||
## 处理事件的两种情况:文件夹 和 Yolo-Resnet-Tracker 的输出
|
||||
fpath = os.path.join(evtpath, evtname)
|
||||
if os.path.isfile(fpath) and (ext==".pkl" or ext==".pickle"):
|
||||
evt = bname.split('_')
|
||||
elif os.path.isdir(fpath):
|
||||
evt = evtname.split('_')
|
||||
else:
|
||||
continue
|
||||
|
||||
if len(evt)>=2 and evt[-1].isdigit() and len(evt[-1])>=10:
|
||||
bcdList.append(evt[-1])
|
||||
evtpaths.append(fpath)
|
||||
|
||||
bcdSet = set(bcdList)
|
||||
|
||||
return evtpaths, bcdSet
|
||||
|
||||
|
||||
|
||||
|
@ -5,27 +5,37 @@ Created on Fri Mar 28 11:35:28 2025
|
||||
@author: ym
|
||||
"""
|
||||
|
||||
from pipeline_01 import execute_pipeline
|
||||
from pipeline import execute_pipeline
|
||||
|
||||
|
||||
execute_pipeline(evtdir = r"D:\datasets\ym\后台数据\unzip",
|
||||
def execute(datapath, savepath_v5, savepath_v10):
|
||||
execute_pipeline(evtdir = datapath,
|
||||
DataType = "raw", # raw, pkl
|
||||
kk=1,
|
||||
kk=None,
|
||||
source_type = "video", # video, image,
|
||||
save_path = r"D:\work\result_pipeline_V5",
|
||||
save_path = savepath_v5,
|
||||
yolo_ver = "V5", # V10, V5
|
||||
weight_yolo_v5 = r'./ckpts/best_cls10_0906.pt' ,
|
||||
weight_yolo_v10 = r'./ckpts/best_v10s_width0375_1205.pt',
|
||||
saveimages = False
|
||||
)
|
||||
|
||||
execute_pipeline(evtdir = r"D:\datasets\ym\后台数据\unzip",
|
||||
execute_pipeline(evtdir = datapath,
|
||||
DataType = "raw", # raw, pkl
|
||||
kk=1,
|
||||
kk=None,
|
||||
source_type = "video", # video, image,
|
||||
save_path = r"D:\work\result_pipeline_V10",
|
||||
save_path = savepath_v10,
|
||||
yolo_ver = "V10", # V10, V5
|
||||
weight_yolo_v5 = r'./ckpts/best_cls10_0906.pt' ,
|
||||
weight_yolo_v10 = r'./ckpts/best_v10s_width0375_1205.pt',
|
||||
saveimages = False
|
||||
)
|
||||
|
||||
datapath = r'/home/wqg/dataset/test_dataset/base_dataset/single_event/source/'
|
||||
savepath_v5 = r'/home/wqg/dataset/pipeline/contrast/single_event_V5'
|
||||
savepath_v10 = r'/home/wqg/dataset/pipeline/contrast/single_event_V10'
|
||||
execute(datapath, savepath_v5, savepath_v10)
|
||||
|
||||
datapath = r'/home/wqg/dataset/test_performence_dataset/'
|
||||
savepath_v5 = r'/home/wqg/dataset/pipeline/contrast/performence_V5'
|
||||
savepath_v10 = r'/home/wqg/dataset/pipeline/contrast/performence_V10'
|
||||
execute(datapath, savepath_v5, savepath_v10)
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
307
pipeline.py
307
pipeline.py
@ -60,48 +60,158 @@ def save_subimgs_1(imgdict, boxes, spath, ctype, simidict = None):
|
||||
|
||||
cv2.imwrite(imgpath, img)
|
||||
|
||||
def show_result(event_tracks, yrtDict, savepath_pipe):
|
||||
'''保存 Tracking 输出的运动轨迹子图,并记录相似度'''
|
||||
|
||||
def pipeline(
|
||||
eventpath,
|
||||
savepath,
|
||||
savepath_pipe_subimgs = savepath_pipe / Path("subimgs")
|
||||
if not savepath_pipe_subimgs.exists():
|
||||
savepath_pipe_subimgs.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
|
||||
|
||||
for CamerType, vts in event_tracks:
|
||||
if len(vts.tracks)==0: continue
|
||||
if CamerType == 'front':
|
||||
# yolos = ShoppingDict["frontCamera"]["yoloResnetTracker"]
|
||||
|
||||
yolos = yrtDict["frontyrt"]
|
||||
ctype = 1
|
||||
if CamerType == 'back':
|
||||
# yolos = ShoppingDict["backCamera"]["yoloResnetTracker"]
|
||||
|
||||
yolos = yrtDict["backyrt"]
|
||||
ctype = 0
|
||||
|
||||
imgdict, featdict, simidict = {}, {}, {}
|
||||
for y in yolos:
|
||||
imgdict.update(y["imgs"])
|
||||
featdict.update(y["feats"])
|
||||
simidict.update(y["featsimi"])
|
||||
|
||||
for track in vts.Residual:
|
||||
if isinstance(track, np.ndarray):
|
||||
save_subimgs(imgdict, track, savepath_pipe_subimgs, ctype, featdict)
|
||||
else:
|
||||
save_subimgs(imgdict, track.slt_boxes, savepath_pipe_subimgs, ctype, featdict)
|
||||
|
||||
'''(3) 轨迹显示与保存'''
|
||||
illus = [None, None]
|
||||
for CamerType, vts in event_tracks:
|
||||
if len(vts.tracks)==0: continue
|
||||
|
||||
if CamerType == 'front':
|
||||
edgeline = cv2.imread("./tracking/shopcart/cart_tempt/board_ftmp_line.png")
|
||||
|
||||
h, w = edgeline.shape[:2]
|
||||
# nh, nw = h//2, w//2
|
||||
# edgeline = cv2.resize(edgeline, (nw, nh), interpolation=cv2.INTER_AREA)
|
||||
|
||||
img_tracking = draw_all_trajectories(vts, edgeline, savepath_pipe, CamerType, draw5p=True)
|
||||
illus[0] = img_tracking
|
||||
|
||||
plt = plot_frameID_y2(vts)
|
||||
plt.savefig(os.path.join(savepath_pipe, "front_y2.png"))
|
||||
|
||||
if CamerType == 'back':
|
||||
edgeline = cv2.imread("./tracking/shopcart/cart_tempt/edgeline.png")
|
||||
|
||||
h, w = edgeline.shape[:2]
|
||||
# nh, nw = h//2, w//2
|
||||
# edgeline = cv2.resize(edgeline, (nw, nh), interpolation=cv2.INTER_AREA)
|
||||
|
||||
img_tracking = draw_all_trajectories(vts, edgeline, savepath_pipe, CamerType, draw5p=True)
|
||||
illus[1] = img_tracking
|
||||
|
||||
illus = [im for im in illus if im is not None]
|
||||
if len(illus):
|
||||
img_cat = np.concatenate(illus, axis = 1)
|
||||
if len(illus)==2:
|
||||
H, W = img_cat.shape[:2]
|
||||
cv2.line(img_cat, (int(W/2), 0), (int(W/2), int(H)), (128, 128, 255), 3)
|
||||
|
||||
trajpath = os.path.join(savepath_pipe, "trajectory.png")
|
||||
cv2.imwrite(trajpath, img_cat)
|
||||
|
||||
|
||||
|
||||
|
||||
def pipeline(eventpath,
|
||||
SourceType,
|
||||
weights,
|
||||
YoloVersion="V5"
|
||||
DataType = "raw", #raw, pkl: images or videos, pkl, pickle file
|
||||
YoloVersion="V5",
|
||||
savepath = None,
|
||||
saveimages = True
|
||||
):
|
||||
'''
|
||||
eventpath: 单个事件的存储路径
|
||||
|
||||
'''
|
||||
optdict = {}
|
||||
optdict["weights"] = weights
|
||||
|
||||
if SourceType == "video":
|
||||
vpaths = get_video_pairs(eventpath)
|
||||
elif SourceType == "image":
|
||||
vpaths = get_image_pairs(eventpath)
|
||||
event_tracks = []
|
||||
|
||||
## 构造购物事件字典
|
||||
evtname = Path(eventpath).stem
|
||||
barcode = evtname.split('_')[-1] if len(evtname.split('_'))>=2 \
|
||||
and len(evtname.split('_')[-1])>=8 \
|
||||
and evtname.split('_')[-1].isdigit() else ''
|
||||
'''事件结果存储文件夹'''
|
||||
|
||||
'''事件结果存储文件夹: savepath_pipe, savepath_pkl'''
|
||||
if not savepath:
|
||||
savepath = Path(__file__).resolve().parents[0] / "events_result"
|
||||
|
||||
savepath_pipeline = Path(savepath) / Path("Yolos_Tracking") / evtname
|
||||
savepath_pipe = Path(savepath) / Path("yolos_tracking") / evtname
|
||||
|
||||
|
||||
"""ShoppingDict pickle 文件保存地址 """
|
||||
savepath_spdict = Path(savepath) / "ShoppingDict_pkfile"
|
||||
if not savepath_spdict.exists():
|
||||
savepath_spdict.mkdir(parents=True, exist_ok=True)
|
||||
pf_path = Path(savepath_spdict) / Path(str(evtname)+".pickle")
|
||||
savepath_pkl = Path(savepath) / "shopping_pkl"
|
||||
if not savepath_pkl.exists():
|
||||
savepath_pkl.mkdir(parents=True, exist_ok=True)
|
||||
pklpath = Path(savepath_pkl) / Path(str(evtname)+".pickle")
|
||||
|
||||
|
||||
|
||||
yrt_out = []
|
||||
if DataType == "raw":
|
||||
### 不重复执行已经过yolo-resnet-tracker
|
||||
if pklpath.exists():
|
||||
print(f"Pickle file have saved: {evtname}.pickle")
|
||||
return
|
||||
|
||||
if SourceType == "video":
|
||||
vpaths = get_video_pairs(eventpath)
|
||||
elif SourceType == "image":
|
||||
vpaths = get_image_pairs(eventpath)
|
||||
|
||||
|
||||
|
||||
for vpath in vpaths:
|
||||
'''================= 2. 事件结果存储文件夹 ================='''
|
||||
|
||||
|
||||
if isinstance(vpath, list):
|
||||
savepath_pipe_imgs = savepath_pipe / Path("images")
|
||||
else:
|
||||
savepath_pipe_imgs = savepath_pipe / Path(str(Path(vpath).stem))
|
||||
|
||||
if not savepath_pipe_imgs.exists():
|
||||
savepath_pipe_imgs.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
optdict = {}
|
||||
optdict["weights"] = weights
|
||||
optdict["source"] = vpath
|
||||
optdict["save_dir"] = savepath_pipe_imgs
|
||||
optdict["is_save_img"] = saveimages
|
||||
optdict["is_save_video"] = True
|
||||
|
||||
|
||||
if YoloVersion == "V5":
|
||||
yrtOut = yolo_resnet_tracker(**optdict)
|
||||
elif YoloVersion == "V10":
|
||||
yrtOut = yolov10_resnet_tracker(**optdict)
|
||||
|
||||
yrt_out.append((vpath, yrtOut))
|
||||
|
||||
elif DataType == "pkl":
|
||||
pass
|
||||
|
||||
else:
|
||||
return
|
||||
|
||||
|
||||
# if pf_path.exists():
|
||||
# print(f"Pickle file have saved: {evtname}.pickle")
|
||||
# return
|
||||
|
||||
'''====================== 构造 ShoppingDict 模块 ======================='''
|
||||
ShoppingDict = {"eventPath": eventpath,
|
||||
@ -112,16 +222,14 @@ def pipeline(
|
||||
"backCamera": {},
|
||||
"one2n": [] #
|
||||
}
|
||||
yrtDict = {}
|
||||
|
||||
|
||||
procpath = Path(eventpath).joinpath('process.data')
|
||||
if procpath.is_file():
|
||||
SimiDict = read_similar(procpath)
|
||||
ShoppingDict["one2n"] = SimiDict['one2n']
|
||||
|
||||
|
||||
for vpath in vpaths:
|
||||
yrtDict = {}
|
||||
event_tracks = []
|
||||
for vpath, yrtOut in yrt_out:
|
||||
'''================= 1. 构造相机事件字典 ================='''
|
||||
CameraEvent = {"cameraType": '', # "front", "back"
|
||||
"videoPath": '',
|
||||
@ -141,33 +249,9 @@ def pipeline(
|
||||
if bname.split('_')[0] == "1" or bname.find('front')>=0:
|
||||
CameraEvent["cameraType"] = "front"
|
||||
|
||||
'''================= 2. 事件结果存储文件夹 ================='''
|
||||
if isinstance(vpath, list):
|
||||
savepath_pipeline_imgs = savepath_pipeline / Path("images")
|
||||
else:
|
||||
savepath_pipeline_imgs = savepath_pipeline / Path(str(Path(vpath).stem))
|
||||
|
||||
if not savepath_pipeline_imgs.exists():
|
||||
savepath_pipeline_imgs.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
savepath_pipeline_subimgs = savepath_pipeline / Path("subimgs")
|
||||
if not savepath_pipeline_subimgs.exists():
|
||||
savepath_pipeline_subimgs.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
'''================= 3. Yolo + Resnet + Tracker ================='''
|
||||
optdict["source"] = vpath
|
||||
optdict["save_dir"] = savepath_pipeline_imgs
|
||||
optdict["is_save_img"] = True
|
||||
optdict["is_save_video"] = True
|
||||
|
||||
|
||||
if YoloVersion == "V5":
|
||||
yrtOut = yolo_resnet_tracker(**optdict)
|
||||
elif YoloVersion == "V10":
|
||||
yrtOut = yolov10_resnet_tracker(**optdict)
|
||||
|
||||
|
||||
'''2种保存方式: (1) no save subimg, (2) save img'''
|
||||
###(1) save images
|
||||
yrtOut_save = []
|
||||
for frdict in yrtOut:
|
||||
fr_dict = {}
|
||||
@ -177,6 +261,7 @@ def pipeline(
|
||||
yrtOut_save.append(fr_dict)
|
||||
CameraEvent["yoloResnetTracker"] = yrtOut_save
|
||||
|
||||
###(2) no save images
|
||||
# CameraEvent["yoloResnetTracker"] = yrtOut
|
||||
|
||||
'''================= 4. tracking ================='''
|
||||
@ -219,108 +304,58 @@ def pipeline(
|
||||
yrtDict["frontyrt"] = yrtOut
|
||||
|
||||
'''========================== 保存模块 ================================='''
|
||||
'''(1) 保存 ShoppingDict 事件'''
|
||||
with open(str(pf_path), 'wb') as f:
|
||||
# 保存 ShoppingDict
|
||||
with open(str(pklpath), 'wb') as f:
|
||||
pickle.dump(ShoppingDict, f)
|
||||
|
||||
'''(2) 保存 Tracking 输出的运动轨迹子图,并记录相似度'''
|
||||
for CamerType, vts in event_tracks:
|
||||
if len(vts.tracks)==0: continue
|
||||
if CamerType == 'front':
|
||||
# yolos = ShoppingDict["frontCamera"]["yoloResnetTracker"]
|
||||
# 绘制并保存轨迹图
|
||||
show_result(event_tracks, yrtDict, savepath_pipe)
|
||||
|
||||
yolos = yrtDict["frontyrt"]
|
||||
ctype = 1
|
||||
if CamerType == 'back':
|
||||
# yolos = ShoppingDict["backCamera"]["yoloResnetTracker"]
|
||||
|
||||
yolos = yrtDict["backyrt"]
|
||||
ctype = 0
|
||||
|
||||
imgdict, featdict, simidict = {}, {}, {}
|
||||
for y in yolos:
|
||||
imgdict.update(y["imgs"])
|
||||
featdict.update(y["feats"])
|
||||
simidict.update(y["featsimi"])
|
||||
|
||||
for track in vts.Residual:
|
||||
if isinstance(track, np.ndarray):
|
||||
save_subimgs(imgdict, track, savepath_pipeline_subimgs, ctype, featdict)
|
||||
else:
|
||||
save_subimgs(imgdict, track.slt_boxes, savepath_pipeline_subimgs, ctype, featdict)
|
||||
|
||||
'''(3) 轨迹显示与保存'''
|
||||
illus = [None, None]
|
||||
for CamerType, vts in event_tracks:
|
||||
if len(vts.tracks)==0: continue
|
||||
|
||||
if CamerType == 'front':
|
||||
edgeline = cv2.imread("./tracking/shopcart/cart_tempt/board_ftmp_line.png")
|
||||
|
||||
h, w = edgeline.shape[:2]
|
||||
# nh, nw = h//2, w//2
|
||||
# edgeline = cv2.resize(edgeline, (nw, nh), interpolation=cv2.INTER_AREA)
|
||||
|
||||
img_tracking = draw_all_trajectories(vts, edgeline, savepath_pipeline, CamerType, draw5p=True)
|
||||
illus[0] = img_tracking
|
||||
|
||||
plt = plot_frameID_y2(vts)
|
||||
plt.savefig(os.path.join(savepath_pipeline, "front_y2.png"))
|
||||
|
||||
if CamerType == 'back':
|
||||
edgeline = cv2.imread("./tracking/shopcart/cart_tempt/edgeline.png")
|
||||
|
||||
h, w = edgeline.shape[:2]
|
||||
# nh, nw = h//2, w//2
|
||||
# edgeline = cv2.resize(edgeline, (nw, nh), interpolation=cv2.INTER_AREA)
|
||||
|
||||
img_tracking = draw_all_trajectories(vts, edgeline, savepath_pipeline, CamerType, draw5p=True)
|
||||
illus[1] = img_tracking
|
||||
|
||||
illus = [im for im in illus if im is not None]
|
||||
if len(illus):
|
||||
img_cat = np.concatenate(illus, axis = 1)
|
||||
if len(illus)==2:
|
||||
H, W = img_cat.shape[:2]
|
||||
cv2.line(img_cat, (int(W/2), 0), (int(W/2), int(H)), (128, 128, 255), 3)
|
||||
|
||||
trajpath = os.path.join(savepath_pipeline, "trajectory.png")
|
||||
cv2.imwrite(trajpath, img_cat)
|
||||
|
||||
def execute_pipeline(evtdir = r"D:\datasets\ym\后台数据\unzip",
|
||||
source_type = "video", # video, image,
|
||||
DataType = "raw", # raw, pkl
|
||||
save_path = r"D:\work\result_pipeline",
|
||||
kk=1,
|
||||
source_type = "video", # video, image,
|
||||
yolo_ver = "V10", # V10, V5
|
||||
|
||||
weight_yolo_v5 = r'./ckpts/best_cls10_0906.pt' ,
|
||||
weight_yolo_v10 = r'./ckpts/best_v10s_width0375_1205.pt',
|
||||
k=0
|
||||
saveimages = True
|
||||
):
|
||||
'''
|
||||
运行函数 pipeline(),遍历事件文件夹,每个文件夹是一个事件
|
||||
'''
|
||||
parmDict = {}
|
||||
parmDict["SourceType"] = source_type
|
||||
parmDict["DataType"] = DataType
|
||||
parmDict["savepath"] = save_path
|
||||
parmDict["SourceType"] = source_type
|
||||
|
||||
parmDict["YoloVersion"] = yolo_ver
|
||||
if parmDict["YoloVersion"] == "V5":
|
||||
parmDict["weights"] = weight_yolo_v5
|
||||
elif parmDict["YoloVersion"] == "V10":
|
||||
parmDict["weights"] = weight_yolo_v10
|
||||
|
||||
parmDict["saveimages"] = saveimages
|
||||
|
||||
|
||||
evtdir = Path(evtdir)
|
||||
errEvents = []
|
||||
k = 0
|
||||
for item in evtdir.iterdir():
|
||||
if item.is_dir():
|
||||
item = evtdir/Path("20250310-175352-741")
|
||||
# item = evtdir/Path("20241212-171505-f0afe929-fdfe-4efa-94d0-2fa748d65fbb_6907992518930")
|
||||
parmDict["eventpath"] = item
|
||||
pipeline(**parmDict)
|
||||
|
||||
# try:
|
||||
# pipeline(**parmDict)
|
||||
# except Exception as e:
|
||||
# errEvents.append(str(item))
|
||||
|
||||
k+=1
|
||||
if k==1:
|
||||
if kk is not None and k==kk:
|
||||
break
|
||||
|
||||
errfile = os.path.join(parmDict["savepath"], 'error_events.txt')
|
||||
@ -329,12 +364,20 @@ def execute_pipeline(evtdir = r"D:\datasets\ym\后台数据\unzip",
|
||||
f.write(line + '\n')
|
||||
|
||||
if __name__ == "__main__":
|
||||
execute_pipeline()
|
||||
datapath = r'/home/wqg/dataset/test_dataset/base_dataset/single_event/source/'
|
||||
savepath = r'/home/wqg/dataset/pipeline/test_result/single_event_V10'
|
||||
|
||||
execute_pipeline(evtdir = datapath,
|
||||
DataType = "raw", # raw, pkl
|
||||
kk=1,
|
||||
source_type = "video", # video, image,
|
||||
save_path = savepath,
|
||||
yolo_ver = "V10", # V10, V5
|
||||
weight_yolo_v5 = r'./ckpts/best_cls10_0906.pt' ,
|
||||
weight_yolo_v10 = r'./ckpts/best_v10s_width0375_1205.pt',
|
||||
saveimages = False
|
||||
)
|
||||
|
||||
# spath_v10 = r"D:\work\result_pipeline_v10"
|
||||
# spath_v5 = r"D:\work\result_pipeline_v5"
|
||||
# execute_pipeline(save_path=spath_v10, yolo_ver="V10")
|
||||
# execute_pipeline(save_path=spath_v5, yolo_ver="V5")
|
||||
|
||||
|
||||
|
||||
|
BIN
realtime/__pycache__/event_time_specify.cpython-312.pyc
Normal file
BIN
realtime/__pycache__/event_time_specify.cpython-312.pyc
Normal file
Binary file not shown.
BIN
realtime/__pycache__/intrude_detect.cpython-312.pyc
Normal file
BIN
realtime/__pycache__/intrude_detect.cpython-312.pyc
Normal file
Binary file not shown.
@ -123,25 +123,28 @@ def devide_motion_state(tboxes, width):
|
||||
|
||||
'''
|
||||
|
||||
periods = []
|
||||
if len(tboxes) < width:
|
||||
return periods
|
||||
|
||||
fboxes, frameTstamp = array2frame(tboxes)
|
||||
|
||||
fnum = len(frameTstamp)
|
||||
if fnum < width: return periods
|
||||
|
||||
state = np.zeros((fnum, 2), dtype=np.int64)
|
||||
frameState = np.concatenate((frameTstamp, state), axis = 1).astype(np.int64)
|
||||
handState = np.concatenate((frameTstamp, state), axis = 1).astype(np.int64)
|
||||
|
||||
|
||||
|
||||
if fnum < width:
|
||||
return frameState, handState
|
||||
|
||||
mtrackFid = {}
|
||||
handFid = {}
|
||||
'''frameState 标记由图像判断的购物车状态:0: 静止,1: 运动'''
|
||||
for idx in range(width, fnum+1):
|
||||
idx0 = idx-width
|
||||
|
||||
# if idx == 40:
|
||||
# print("123")
|
||||
|
||||
lboxes = np.concatenate(fboxes[idx0:idx], axis = 0)
|
||||
md = MoveDetect(lboxes)
|
||||
md.classify()
|
||||
|
420
realtime/intrude_detect.py
Normal file
420
realtime/intrude_detect.py
Normal file
@ -0,0 +1,420 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Tue Apr 8 10:07:17 2025
|
||||
|
||||
@author: wqg
|
||||
"""
|
||||
|
||||
import csv
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
import pickle
|
||||
import cv2
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.pyplot as plt
|
||||
from typing import List, Tuple
|
||||
from scipy.spatial.distance import cdist
|
||||
from scipy.spatial import ConvexHull
|
||||
from shapely.geometry import Point, Polygon
|
||||
|
||||
##################################################### for method: run_yrt()
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[1]
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.insert(0, str(ROOT))
|
||||
|
||||
from track_reid import yolov10_resnet_tracker
|
||||
from event_time_specify import devide_motion_state
|
||||
|
||||
|
||||
def cross(o: Tuple[float, float], a: Tuple[float, float], b: Tuple[float, float]) -> float:
|
||||
""" 计算向量 OA × OB 的叉积 """
|
||||
return (a[0] - o[0]) * (b[1] - o[1]) - (a[1] - o[1]) * (b[0] - o[0])
|
||||
|
||||
def compute_convex_hull(points: List[Tuple[float, float]]) -> List[Tuple[float, float]]:
|
||||
""" 使用 Andrew's Monotone Chain 算法求二维点集的凸包 """
|
||||
points = sorted(set(points)) # 排序并去重
|
||||
if len(points) <= 1:
|
||||
return points
|
||||
|
||||
lower = []
|
||||
for p in points:
|
||||
while len(lower) >= 2 and cross(lower[-2], lower[-1], p) <= 0:
|
||||
lower.pop()
|
||||
lower.append(p)
|
||||
|
||||
upper = []
|
||||
for p in reversed(points):
|
||||
while len(upper) >= 2 and cross(upper[-2], upper[-1], p) <= 0:
|
||||
upper.pop()
|
||||
upper.append(p)
|
||||
|
||||
# 去掉重复的连接点
|
||||
return lower[:-1] + upper[:-1]
|
||||
|
||||
def is_point_in_convex_hull(point: Tuple[float, float], hull: List[Tuple[float, float]]) -> bool:
|
||||
""" 判断一个点是否在凸包(含边界)内 """
|
||||
n = len(hull)
|
||||
if n < 3:
|
||||
# 对于点或线段,直接判断是否共线或在线段上
|
||||
if n == 1:
|
||||
return point == hull[0]
|
||||
if n == 2:
|
||||
a, b = hull
|
||||
return abs(cross(a, b, point)) < 1e-10 and min(a[0], b[0]) <= point[0] <= max(a[0], b[0]) and min(a[1], b[1]) <= point[1] <= max(a[1], b[1])
|
||||
return False
|
||||
|
||||
for i in range(n):
|
||||
a = hull[i]
|
||||
b = hull[(i + 1) % n]
|
||||
if cross(a, b, point) < -1e-10: # 必须全部在左边或边上
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
|
||||
def plot_convex_hull(points: List[Tuple[float, float]], hull: List[Tuple[float, float]], test_points: List[Tuple[float, float]] = None):
|
||||
x_all, y_all = zip(*points)
|
||||
fig, ax = plt.subplots()
|
||||
|
||||
ax.set_xlim(0, 1024)
|
||||
ax.set_ylim(1280, 0)
|
||||
|
||||
ax.plot(x_all, y_all, 'o', label='Points')
|
||||
|
||||
# 凸包闭环线
|
||||
hull_loop = hull + [hull[0]]
|
||||
hx, hy = zip(*hull_loop)
|
||||
ax.plot(hx, hy, 'r-', linewidth=2, label='Convex Hull')
|
||||
|
||||
# 如果有测试点
|
||||
if test_points:
|
||||
for pt in test_points:
|
||||
color = 'green' if is_point_in_convex_hull(pt, hull) else 'black'
|
||||
ax.plot(pt[0], pt[1], 's', color=color, markersize=8)
|
||||
ax.text(pt[0] + 0.05, pt[1], f'{pt}', fontsize=9)
|
||||
|
||||
ax.legend()
|
||||
ax.grid(True)
|
||||
plt.title("Convex Hull Visualization")
|
||||
plt.show()
|
||||
|
||||
|
||||
def convex_scipy():
|
||||
points = np.array([
|
||||
[0, 0],
|
||||
[2, 0],
|
||||
[1, 1],
|
||||
[2, 2],
|
||||
[0, 2],
|
||||
[1, 0.5]])
|
||||
hull = ConvexHull(points)
|
||||
|
||||
# 凸包顶点的索引
|
||||
print("凸包顶点索引:{}".format(hull.vertices))
|
||||
print("凸包顶点坐标:")
|
||||
for i in hull.vertices:
|
||||
print(points[i])
|
||||
|
||||
|
||||
# 将凸包坐标构造成 Polygon
|
||||
hull_points = points[hull.vertices]
|
||||
polygon = Polygon(hull_points)
|
||||
|
||||
# 判断一个点是否在凸包内
|
||||
p = Point(1, 1) # 示例点
|
||||
print("是否在凸包内:", polygon.contains(p)) # True or False
|
||||
|
||||
|
||||
def test_convex():
|
||||
# 测试数据
|
||||
sample_points = [(0, 0), (1, 1), (2, 2), (2, 0), (0, 2), (1, 0.5)]
|
||||
convex_hull = compute_convex_hull(sample_points)
|
||||
|
||||
# 测试点在凸包内
|
||||
test_point_inside = (1, 1)
|
||||
test_point_outside = (3, 3)
|
||||
test_point_on_edge = (1, 0)
|
||||
|
||||
inside = is_point_in_convex_hull(test_point_inside, convex_hull)
|
||||
outside = is_point_in_convex_hull(test_point_outside, convex_hull)
|
||||
on_edge = is_point_in_convex_hull(test_point_on_edge, convex_hull)
|
||||
|
||||
convex_hull, inside, outside, on_edge
|
||||
|
||||
# 展示图像
|
||||
plot_convex_hull(sample_points, convex_hull, [test_point_inside, test_point_outside, test_point_on_edge])
|
||||
|
||||
def array2frame(tboxes):
|
||||
"tboxes: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]"
|
||||
idx = np.where(tboxes[:, 6] != 0)[0]
|
||||
bboxes = tboxes[idx, :]
|
||||
frameID = np.sort(np.unique(bboxes[:, 7].astype(int)))
|
||||
fboxes = []
|
||||
for fid in frameID:
|
||||
idx = np.where(bboxes[:, 7] == fid)[0]
|
||||
box = bboxes[idx, :]
|
||||
fboxes.append(box)
|
||||
return fboxes
|
||||
|
||||
def convex_based(tboxes, width, TH=40):
|
||||
fboxes = array2frame(tboxes)
|
||||
fnum = len(fboxes)
|
||||
|
||||
fids = np.array([i+1 for i in range(fnum)])[:, np.newaxis]
|
||||
state = np.zeros((fnum, 1), dtype=np.int64)
|
||||
frameState = np.concatenate((fids, state), axis = 1).astype(np.int64)
|
||||
if fnum < width:
|
||||
return frameState
|
||||
|
||||
for idx1 in range(width, fnum+1):
|
||||
idx0 = idx1 - width
|
||||
idx = idx1 - width//2 - 1
|
||||
|
||||
iboxes = fboxes[:idx]
|
||||
cboxes = fboxes[idx][:, 0:4]
|
||||
|
||||
cur_xy = np.zeros((len(cboxes), 2))
|
||||
cur_xy[:, 0] = (fboxes[idx][:, 0]+fboxes[idx][:, 2])/2
|
||||
cur_xy[:, 1] = (fboxes[idx][:, 1]+fboxes[idx][:, 3])/2
|
||||
for i in range(width//2):
|
||||
x1, y1, x2, y2 = iboxes[i][:, 0], iboxes[i][:, 1], iboxes[i][:, 2], iboxes[i][:, 3]
|
||||
|
||||
boxes = np.array([(x1, y1), (x1, y2), (x2, y1), (x2, y2)]).transpose(0, 2, 1).reshape(-1, 2)
|
||||
box1 = [(x, y) for x, y in boxes]
|
||||
convex_hull = compute_convex_hull(box1)
|
||||
|
||||
for pt in cur_xy:
|
||||
inside = is_point_in_convex_hull(pt, convex_hull)
|
||||
|
||||
if not inside:
|
||||
break
|
||||
if not inside:
|
||||
break
|
||||
|
||||
# Based on the distance between the four corners of the current frame boxes
|
||||
# and adjacent frame boxes
|
||||
iboxes = fboxes[idx0:idx] + fboxes[idx+1:idx1]
|
||||
cboxes = fboxes[idx][:, 0:4]
|
||||
cx1, cy1, cx2, cy2 = cboxes[:, 0], cboxes[:, 1], cboxes[:, 2], cboxes[:, 3]
|
||||
cxy = np.array([(cx1, cy1), (cx1, cy2), (cx2, cy1), (cx2, cy2)]).transpose(0, 2, 1).reshape(-1, 2)
|
||||
|
||||
iiboxes = np.concatenate(iboxes, axis=0)
|
||||
ix1, iy1, ix2, iy2 = iiboxes[:, 0], iiboxes[:, 1], iiboxes[:, 2], iiboxes[:, 3]
|
||||
ixy = np.array([(ix1, iy1), (ix1, iy2), (ix2, iy1), (ix2, iy2)]).transpose(0, 2, 1).reshape(-1, 2)
|
||||
|
||||
Dist = cdist(cxy, ixy).round(2)
|
||||
max_dist = np.max(np.min(Dist, axis=1))
|
||||
if max_dist > TH and not inside:
|
||||
frameState[idx, 1] = 1
|
||||
# plot_convex_hull(boxes, convex_hull, [pt])
|
||||
frameState[idx, 1] = 1
|
||||
|
||||
return frameState
|
||||
|
||||
|
||||
def single_point(tboxes, width, TH=60):
|
||||
"""width: window width, >=2"""
|
||||
|
||||
|
||||
fboxes = array2frame(tboxes)
|
||||
fnum = len(fboxes)
|
||||
|
||||
fids = np.array([i+1 for i in range(fnum)])[:, np.newaxis]
|
||||
state = np.zeros((fnum, 1), dtype=np.int64)
|
||||
frameState = np.concatenate((fids, state), axis = 1).astype(np.int64)
|
||||
|
||||
|
||||
if fnum < width:
|
||||
return frameState
|
||||
|
||||
for idx1 in range(width, fnum+1):
|
||||
idx0 = idx1 - width
|
||||
idx = idx1 - width//2 - 1
|
||||
|
||||
iboxe1 = fboxes[idx0:idx]
|
||||
iboxe2 = fboxes[idx+1:idx1]
|
||||
iboxes = fboxes[idx0:idx] + fboxes[idx+1:idx1]
|
||||
|
||||
cboxes = fboxes[idx][:, 0:4]
|
||||
cur_xy = np.zeros((len(cboxes), 2))
|
||||
cur_xy[:, 0] = (fboxes[idx][:, 0]+fboxes[idx][:, 2])/2
|
||||
cur_xy[:, 1] = (fboxes[idx][:, 1]+fboxes[idx][:, 3])/2
|
||||
Dist = np.empty((len(cboxes), 0))
|
||||
for i in range(width-1):
|
||||
boxes = iboxes[i][:, 0:4]
|
||||
|
||||
box_xy = np.zeros((len(boxes), 2))
|
||||
box_xy[:, 0] = (boxes[:, 0]+boxes[:, 2])/2
|
||||
box_xy[:, 1] = (boxes[:, 1]+boxes[:, 3])/2
|
||||
dist2 = cdist(cur_xy, box_xy).round(2)
|
||||
|
||||
Dist = np.concatenate((Dist, dist2), axis=1)
|
||||
|
||||
max_dist = np.max(np.min(Dist, axis=1))
|
||||
|
||||
if max_dist > TH:
|
||||
frameState[idx, 1] = 1
|
||||
|
||||
return frameState
|
||||
|
||||
|
||||
|
||||
def intrude():
|
||||
pkpath = Path("/home/wqg/dataset/small-goods/pkfiles")
|
||||
savepath = Path("/home/wqg/dataset/small-goods/illustration_convex")
|
||||
|
||||
if not savepath.exists():
|
||||
savepath.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
err_trail, err_single, err_all = [], [], []
|
||||
num = 0
|
||||
for pth in pkpath.iterdir():
|
||||
# item = r"69042386_20250407-145737_front_returnGood_b82d28427666_15_17700000001.pickle"
|
||||
# pth = pkpath/item
|
||||
|
||||
with open(str(pth), 'rb') as f:
|
||||
yrt = pickle.load(f)
|
||||
|
||||
evtname = pth.stem
|
||||
|
||||
bboxes = []
|
||||
trackerboxes = np.empty((0, 10), dtype=np.float64)
|
||||
for frameDict in yrt:
|
||||
boxes = frameDict["bboxes"]
|
||||
tboxes = frameDict["tboxes"]
|
||||
tboxes = np.concatenate((tboxes, tboxes[:,7][:, None]), axis=1)
|
||||
|
||||
bboxes.append(boxes)
|
||||
|
||||
trackerboxes = np.concatenate((trackerboxes, np.array(tboxes)), axis=0)
|
||||
|
||||
'''single-points based for intrusion detection'''
|
||||
# wd =5
|
||||
# fstate1 = single_point(trackerboxes, wd)
|
||||
|
||||
'''convex-based '''
|
||||
width = 5
|
||||
fstate = convex_based(trackerboxes, width, TH=60)
|
||||
|
||||
# fstate = np.zeros(fstate1.shape)
|
||||
# fstate[:, 0] = fstate1[:, 0]
|
||||
# fstate[:, 1] = fstate1[:, 1] * fstate2[:, 1]
|
||||
|
||||
'''trajectory based for intrusion detection
|
||||
period: 0 1 2 3
|
||||
fid timestamp(fid) 基于滑动窗的tid扩展 滑动窗覆盖的运动区间
|
||||
'''
|
||||
win_width = 12
|
||||
period, handState = devide_motion_state(trackerboxes, win_width)
|
||||
|
||||
num += 1
|
||||
if np.all(period[:,2:4]==0):
|
||||
err_trail.append(evtname)
|
||||
if np.all(fstate[:,1]==0):
|
||||
err_single.append(evtname)
|
||||
if np.all(period[:,2:4]==0) and np.all(fstate[:,1]==0):
|
||||
err_all.append(evtname)
|
||||
|
||||
fig, (ax1, ax2) = plt.subplots(2, 1)
|
||||
ax1.plot(period[:, 1], period[:, 2], 'bo-', linewidth=1, markersize=4)
|
||||
ax1.plot(period[:, 1], period[:, 3], 'rx-', linewidth=1, markersize=8)
|
||||
|
||||
ax2.plot(fstate[:, 0], fstate[:, 1], 'rx-', linewidth=1, markersize=8)
|
||||
plt.savefig(os.path.join(str(savepath), f"{evtname}.png"))
|
||||
|
||||
|
||||
plt.close()
|
||||
# if num==1:
|
||||
# break
|
||||
|
||||
rate_trail = 1 - len(err_trail)/num
|
||||
rate_single = 1 - len(err_single)/num
|
||||
rate_all = 1 - len(err_all)/num
|
||||
|
||||
print(f"rate_trail: {rate_trail}")
|
||||
print(f"rate_single: {rate_single}")
|
||||
print(f"rate_all: {rate_all}")
|
||||
|
||||
txtpath = savepath.parents[0] / "error.txt"
|
||||
with open(str(txtpath), "w") as f:
|
||||
f.write(f"rate_trail: {rate_trail}" + "\n")
|
||||
f.write(f"rate_single: {rate_single}" + "\n")
|
||||
f.write(f"rate_all: {rate_all}" + "\n")
|
||||
|
||||
f.write("\n" + "err_trail" + "\n")
|
||||
for line in err_trail:
|
||||
f.write(line + "\n")
|
||||
|
||||
f.write("\n" + "err_single" + "\n")
|
||||
for line in err_single:
|
||||
f.write(line + "\n")
|
||||
|
||||
f.write("\n" + "err_all" + "\n")
|
||||
for line in err_all:
|
||||
f.write(line + "\n")
|
||||
print("Done!")
|
||||
|
||||
|
||||
|
||||
|
||||
def run_yrt():
|
||||
datapath = Path("/home/wqg/dataset/small-goods/videos/")
|
||||
savepath = Path("/home/wqg/dataset/small-goods/result/")
|
||||
pkpath = Path("/home/wqg/dataset/small-goods/pkfiles/")
|
||||
|
||||
if not savepath.exists():
|
||||
savepath.mkdir(parents=True, exist_ok=True)
|
||||
if not pkpath.exists():
|
||||
pkpath.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
optdict = {}
|
||||
optdict["weights"] = ROOT / 'ckpts/best_v10s_width0375_1205.pt'
|
||||
optdict["is_save_img"] = False
|
||||
optdict["is_save_video"] = True
|
||||
|
||||
k = 0
|
||||
for pth in datapath.iterdir():
|
||||
item = "69042386_20250407-145819_back_returnGood_b82d28427666_15_17700000001.mp4"
|
||||
pth = pth.parents[0] /item
|
||||
|
||||
optdict["source"] = pth
|
||||
optdict["save_dir"] = savepath
|
||||
|
||||
# try:
|
||||
yrtOut = yolov10_resnet_tracker(**optdict)
|
||||
|
||||
pkpath_ = pkpath / f"{Path(pth).stem}.pickle"
|
||||
with open(str(pkpath_), 'wb') as f:
|
||||
pickle.dump(yrtOut, f)
|
||||
|
||||
k += 1
|
||||
if k==1:
|
||||
break
|
||||
# except Exception as e:
|
||||
# print("abc")
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# run_yrt()
|
||||
|
||||
intrude()
|
||||
|
||||
# test_convex()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -19,7 +19,18 @@ from collections import OrderedDict
|
||||
from event_time_specify import devide_motion_state #, state_measure
|
||||
|
||||
import sys
|
||||
sys.path.append(r"D:\DetectTracking")
|
||||
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[1] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
from imgs_inference import run_yolo
|
||||
from tracking.utils.read_data import read_weight_sensor
|
||||
|
||||
|
@ -128,6 +128,8 @@ def init_trackers(tracker_yaml = None, bs=1):
|
||||
"""
|
||||
# tracker_yaml = r"./tracking/trackers/cfg/botsort.yaml"
|
||||
|
||||
tracker_yaml = str(tracker_yaml)
|
||||
|
||||
TRACKER_MAP = {'bytetrack': BYTETracker, 'botsort': BOTSORT}
|
||||
|
||||
cfg = IterableSimpleNamespace(**yaml_load(tracker_yaml))
|
||||
@ -149,7 +151,7 @@ def yolov10_resnet_tracker(
|
||||
is_save_img = True,
|
||||
is_save_video = True,
|
||||
|
||||
tracker_yaml = "./tracking/trackers/cfg/botsort.yaml",
|
||||
tracker_yaml = ROOT / "tracking/trackers/cfg/botsort.yaml",
|
||||
line_thickness=3, # bounding box thickness (pixels)
|
||||
hide_labels=False, # hide labels
|
||||
):
|
||||
@ -157,7 +159,7 @@ def yolov10_resnet_tracker(
|
||||
## load a custom model
|
||||
model = YOLOv10(weights)
|
||||
|
||||
custom = {"conf": 0.25, "batch": 1, "save": False, "mode": "predict"}
|
||||
custom = {"conf": 0.1, "batch": 1, "save": False, "mode": "predict"}
|
||||
kwargs = {"save": True, "imgsz": 640, "conf": 0.1}
|
||||
args = {**model.overrides, **custom, **kwargs}
|
||||
predictor = model.task_map[model.task]["predictor"](overrides=args, _callbacks=model.callbacks)
|
||||
@ -294,7 +296,7 @@ def yolo_resnet_tracker(
|
||||
is_save_img = True,
|
||||
is_save_video = True,
|
||||
|
||||
tracker_yaml = "./tracking/trackers/cfg/botsort.yaml",
|
||||
tracker_yaml = ROOT / "tracking/trackers/cfg/botsort.yaml",
|
||||
imgsz=(640, 640), # inference size (height, width)
|
||||
conf_thres=0.25, # confidence threshold
|
||||
iou_thres=0.45, # NMS IOU threshold
|
||||
@ -359,6 +361,7 @@ def yolo_resnet_tracker(
|
||||
# Process predictions
|
||||
for i, det in enumerate(pred): # per image
|
||||
im0 = im0s.copy()
|
||||
|
||||
annotator = Annotator(im0.copy(), line_width=line_thickness, example=str(names))
|
||||
s += '%gx%g ' % im.shape[2:] # print string
|
||||
if len(det):
|
||||
@ -461,6 +464,7 @@ def yolo_resnet_tracker(
|
||||
else: # stream
|
||||
fps, w, h = 25, im0.shape[1], im0.shape[0]
|
||||
## for image rotating in dataloader.LoadImages.__next__()
|
||||
|
||||
w, h = im0.shape[1], im0.shape[0]
|
||||
|
||||
vdieo_path = str(Path(vdieo_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
|
||||
@ -484,7 +488,7 @@ def run(
|
||||
project=ROOT / 'runs/detect', # save results to project/name
|
||||
name='exp', # save results to project/name
|
||||
|
||||
tracker_yaml = "./tracking/trackers/cfg/botsort.yaml",
|
||||
tracker_yaml = ROOT / "tracking/trackers/cfg/botsort.yaml",
|
||||
imgsz=(640, 640), # inference size (height, width)
|
||||
conf_thres=0.25, # confidence threshold
|
||||
iou_thres=0.45, # NMS IOU threshold
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -7,9 +7,19 @@ Created on Mon Mar 4 18:36:31 2024
|
||||
import numpy as np
|
||||
import cv2
|
||||
import copy
|
||||
from tracking.utils.mergetrack import track_equal_track
|
||||
from scipy.spatial.distance import cdist
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
FILE = Path(__file__).resolve()
|
||||
ROOT = FILE.parents[2] # YOLOv5 root directory
|
||||
if str(ROOT) not in sys.path:
|
||||
sys.path.append(str(ROOT))
|
||||
|
||||
from tracking.utils.mergetrack import track_equal_track
|
||||
|
||||
|
||||
from scipy.spatial.distance import cdist
|
||||
|
||||
curpath = Path(__file__).resolve().parents[0]
|
||||
curpath = Path(curpath)
|
||||
parpath = curpath.parent
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -11,49 +11,89 @@ import pickle
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
from scipy.spatial.distance import cdist
|
||||
import copy
|
||||
|
||||
from .dotrack.dotracks_back import doBackTracks
|
||||
from .dotrack.dotracks_front import doFrontTracks
|
||||
from .utils.drawtracks import plot_frameID_y2, draw_all_trajectories
|
||||
from .utils.read_data import read_similar
|
||||
from dotrack.dotracks_back import doBackTracks
|
||||
from dotrack.dotracks_front import doFrontTracks
|
||||
from utils.drawtracks import plot_frameID_y2, draw_all_trajectories
|
||||
from utils.read_data import read_similar
|
||||
|
||||
|
||||
def get_trail(ShoppingDict, ppath):
|
||||
|
||||
evtname = ShoppingDict["eventName"]
|
||||
|
||||
back_yrt = ShoppingDict["backCamera"]["yoloResnetTracker"]
|
||||
front_yrt = ShoppingDict["frontCamera"]["yoloResnetTracker"]
|
||||
|
||||
back_vts = ShoppingDict["frontCamera"]["tracking"]
|
||||
front_vts = ShoppingDict["backCamera"]["tracking"]
|
||||
|
||||
|
||||
class CameraEvent_:
|
||||
def __init__(self):
|
||||
self.cameraType = '', # "front", "back"
|
||||
self.videoPath = '',
|
||||
self.imagePaths = [],
|
||||
self.yoloResnetTracker =[],
|
||||
self.tracking = None,
|
||||
event_tracks = [("back", back_yrt, back_vts), ("front", front_yrt, front_vts)]
|
||||
|
||||
class ShoppingEvent_:
|
||||
def __init__(self):
|
||||
self.eventPath = ''
|
||||
self.eventName = ''
|
||||
self.barcode = ''
|
||||
self.eventType = '', # "input", "output", "other"
|
||||
self.frontCamera = None
|
||||
self.backCamera = None
|
||||
self.one2n = []
|
||||
|
||||
savepath = ppath / "alltrail"
|
||||
if not savepath.exists():
|
||||
savepath.mkdir()
|
||||
|
||||
savepath = str(savepath)
|
||||
evtime = evtname[:15]
|
||||
|
||||
illus = [None, None]
|
||||
for camera_type, yrtOut, vts in event_tracks:
|
||||
if len(vts.Residual)==1: continue
|
||||
|
||||
if camera_type == 'front':
|
||||
edgeline = cv2.imread("./shopcart/cart_tempt/board_ftmp_line.png")
|
||||
|
||||
img_tracking = draw_all_trajectories(vts, edgeline, savepath, camera_type, draw5p=False)
|
||||
illus[0] = img_tracking
|
||||
|
||||
plt = plot_frameID_y2(vts)
|
||||
plt.savefig(os.path.join(savepath, f"{evtime}_front.png"))
|
||||
|
||||
if camera_type == 'back':
|
||||
edgeline = cv2.imread("./shopcart/cart_tempt/edgeline.png")
|
||||
|
||||
img_tracking = draw_all_trajectories(vts, edgeline, savepath, camera_type, draw5p=False)
|
||||
illus[1] = img_tracking
|
||||
|
||||
illus = [im for im in illus if im is not None]
|
||||
if len(illus):
|
||||
img_cat = np.concatenate(illus, axis = 1)
|
||||
if len(illus)==2:
|
||||
H, W = img_cat.shape[:2]
|
||||
cv2.line(img_cat, (int(W/2), 0), (int(W/2), int(H)), (128, 128, 255), 3)
|
||||
|
||||
trajpath = os.path.join(savepath, f"{evtime}.png")
|
||||
cv2.imwrite(trajpath, img_cat)
|
||||
|
||||
return evtime
|
||||
|
||||
return None
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def track_opt(ShoppingDict, ppath):
|
||||
'''
|
||||
将一个对象读取,修改其中一个属性
|
||||
|
||||
'''
|
||||
evtname = ShoppingDict["eventName"]
|
||||
|
||||
shopping = copy.deepcopy(ShoppingDict)
|
||||
|
||||
evt_pkfile = 'path.pickle'
|
||||
with open(evt_pkfile, 'rb') as f:
|
||||
ShoppingDict = pickle.load(f)
|
||||
|
||||
savepath = ""
|
||||
## only need to init item: tracking for each Camera
|
||||
shopping["frontCamera"]["tracking"] = []
|
||||
shopping["backCamera"]["tracking"] = []
|
||||
|
||||
back_camera = ShoppingDict["backCamera"]["cameraType"]
|
||||
back_yrt = ShoppingDict["backCamera"]["yoloResnetTracker"]
|
||||
@ -61,120 +101,132 @@ def main():
|
||||
front_yrt = ShoppingDict["frontCamera"]["yoloResnetTracker"]
|
||||
yrts = [(back_camera, back_yrt), (front_camera, front_yrt)]
|
||||
|
||||
|
||||
shopping_event = ShoppingEvent_()
|
||||
shopping_event.eventPath = ShoppingDict["eventPath"]
|
||||
shopping_event.eventName = ShoppingDict["eventName"]
|
||||
shopping_event.barcode = ShoppingDict["barcode"]
|
||||
|
||||
yrtDict = {}
|
||||
event_tracks = []
|
||||
errtrail = ''
|
||||
for camera_type, yrtOut in yrts:
|
||||
'''
|
||||
inputs:
|
||||
yrtOut
|
||||
camera_type
|
||||
outputs:
|
||||
CameraEvent
|
||||
'''
|
||||
|
||||
camera_event = CameraEvent_()
|
||||
|
||||
|
||||
|
||||
'''================= 4. tracking ================='''
|
||||
'''================= 1. tracking ================='''
|
||||
'''(1) 生成用于 tracking 模块的 boxes、feats'''
|
||||
bboxes = np.empty((0, 6), dtype=np.float64)
|
||||
# bboxes = np.empty((0, 6), dtype=np.float64)
|
||||
trackerboxes = np.empty((0, 9), dtype=np.float64)
|
||||
trackefeats = {}
|
||||
for frameDict in yrtOut:
|
||||
tboxes = frameDict["tboxes"]
|
||||
ffeats = frameDict["feats"]
|
||||
|
||||
boxes = frameDict["bboxes"]
|
||||
bboxes = np.concatenate((bboxes, np.array(boxes)), axis=0)
|
||||
# boxes = frameDict["bboxes"]
|
||||
# bboxes = np.concatenate((bboxes, np.array(boxes)), axis=0)
|
||||
trackerboxes = np.concatenate((trackerboxes, np.array(tboxes)), axis=0)
|
||||
for i in range(len(tboxes)):
|
||||
fid, bid = int(tboxes[i, 7]), int(tboxes[i, 8])
|
||||
trackefeats.update({f"{fid}_{bid}": ffeats[f"{fid}_{bid}"]})
|
||||
|
||||
|
||||
'''(2) tracking, 后摄'''
|
||||
if CameraEvent["cameraType"] == "back":
|
||||
if camera_type == "back":
|
||||
vts = doBackTracks(trackerboxes, trackefeats)
|
||||
vts.classify()
|
||||
event_tracks.append(("back", vts))
|
||||
shopping["backCamera"]["tracking"] = vts
|
||||
|
||||
if len(vts.Residual)!=1:
|
||||
errtrail = evtname
|
||||
|
||||
|
||||
camera_event.camera_type = camera_type
|
||||
camera_event.yoloResnetTracker = yrtOut
|
||||
camera_event.tracking = vts
|
||||
camera_event.videoPath = ShoppingDict["backCamera"]["videoPath"]
|
||||
camera_event.imagePaths = ShoppingDict["backCamera"]["imagePaths"]
|
||||
shopping_event.backCamera = camera_event
|
||||
|
||||
yrtDict["backyrt"] = yrtOut
|
||||
|
||||
'''(2) tracking, 前摄'''
|
||||
if CameraEvent["cameraType"] == "front":
|
||||
'''(3) tracking, 前摄'''
|
||||
if camera_type == "front":
|
||||
vts = doFrontTracks(trackerboxes, trackefeats)
|
||||
vts.classify()
|
||||
event_tracks.append(("front", vts))
|
||||
shopping["frontCamera"]["tracking"] = vts
|
||||
|
||||
camera_event.camera_type = camera_type
|
||||
camera_event.yoloResnetTracker = yrtOut
|
||||
camera_event.tracking = vts
|
||||
camera_event.videoPath = ShoppingDict["frontCamera"]["videoPath"]
|
||||
camera_event.imagePaths = ShoppingDict["frontCamera"]["imagePaths"]
|
||||
shopping_event.backCamera = camera_event
|
||||
if len(vts.Residual)!=1:
|
||||
errtrail = evtname
|
||||
|
||||
yrtDict["frontyrt"] = yrtOut
|
||||
event_tracks.append((camera_type, yrtOut, vts))
|
||||
|
||||
|
||||
name = Path(evt_pkfile).stem
|
||||
pf_path = os.path.join(savepath, name+"_new.pickle")
|
||||
with open(str(pf_path), 'wb') as f:
|
||||
pickle.dump(shopping_event, f)
|
||||
pckpath = ppath / "track_optim"
|
||||
if not pckpath.exists():
|
||||
pckpath.mkdir()
|
||||
|
||||
fpath = pckpath / "{}_new.pickle".format(evtname)
|
||||
with open(str(fpath), 'wb') as f:
|
||||
pickle.dump(shopping, f)
|
||||
|
||||
|
||||
savepath = ppath / "yolos_tracking" / evtname
|
||||
|
||||
illus = [None, None]
|
||||
for CamerType, vts in event_tracks:
|
||||
for camera_type, yrtOut, vts in event_tracks:
|
||||
if len(vts.tracks)==0: continue
|
||||
|
||||
if CamerType == 'front':
|
||||
edgeline = cv2.imread("./tracking/shopcart/cart_tempt/board_ftmp_line.png")
|
||||
if camera_type == 'front':
|
||||
edgeline = cv2.imread("./shopcart/cart_tempt/board_ftmp_line.png")
|
||||
|
||||
h, w = edgeline.shape[:2]
|
||||
# nh, nw = h//2, w//2
|
||||
# edgeline = cv2.resize(edgeline, (nw, nh), interpolation=cv2.INTER_AREA)
|
||||
|
||||
img_tracking = draw_all_trajectories(vts, edgeline, savepath_pipeline, CamerType, draw5p=True)
|
||||
img_tracking = draw_all_trajectories(vts, edgeline, savepath, camera_type, draw5p=False)
|
||||
illus[0] = img_tracking
|
||||
|
||||
plt = plot_frameID_y2(vts)
|
||||
plt.savefig(os.path.join(savepath_pipeline, "front_y2.png"))
|
||||
plt.savefig(os.path.join(savepath, "front_y2_new.png"))
|
||||
|
||||
if CamerType == 'back':
|
||||
edgeline = cv2.imread("./tracking/shopcart/cart_tempt/edgeline.png")
|
||||
if camera_type == 'back':
|
||||
edgeline = cv2.imread("./shopcart/cart_tempt/edgeline.png")
|
||||
|
||||
h, w = edgeline.shape[:2]
|
||||
# nh, nw = h//2, w//2
|
||||
# edgeline = cv2.resize(edgeline, (nw, nh), interpolation=cv2.INTER_AREA)
|
||||
|
||||
img_tracking = draw_all_trajectories(vts, edgeline, savepath_pipeline, CamerType, draw5p=True)
|
||||
img_tracking = draw_all_trajectories(vts, edgeline, savepath, camera_type, draw5p=False)
|
||||
illus[1] = img_tracking
|
||||
|
||||
illus = [im for im in illus if im is not None]
|
||||
if len(illus):
|
||||
img_cat = np.concatenate(illus, axis = 1)
|
||||
if len(illus)==2:
|
||||
H, W = img_cat.shape[:2]
|
||||
cv2.line(img_cat, (int(W/2), 0), (int(W/2), int(H)), (128, 128, 255), 3)
|
||||
|
||||
trajpath = os.path.join(savepath, "trajectory_new.png")
|
||||
cv2.imwrite(trajpath, img_cat)
|
||||
|
||||
return errtrail
|
||||
|
||||
|
||||
def main():
|
||||
# evttypes = ["single_event_V10", "single_event_V5", "performence_V10", "performence_V5"]
|
||||
evttypes = ["single_event_V10"]
|
||||
|
||||
k = 0
|
||||
error_trail = []
|
||||
for evttype in evttypes:
|
||||
ppath = Path("/home/wqg/dataset/pipeline/yrt/{}".format(evttype))
|
||||
|
||||
pkpath = ppath / "shopping_pkl"
|
||||
for fp in pkpath.iterdir():
|
||||
# fp = pkpath / "{}.pickle".format("20250305-152917-635_6970209860221_6970209860221")
|
||||
print(fp)
|
||||
|
||||
if fp.suffix != '.pickle': continue
|
||||
with open(str(fp), 'rb') as f:
|
||||
ShoppingDict = pickle.load(f)
|
||||
|
||||
# errtrail = track_opt(ShoppingDict, ppath)
|
||||
# error_trail.append(errtrail)
|
||||
|
||||
errtrail = get_trail(ShoppingDict, ppath)
|
||||
if errtrail is not None:
|
||||
error_trail.append(errtrail)
|
||||
|
||||
# k+=1
|
||||
# if k==100:
|
||||
# break
|
||||
|
||||
errfile = ppath / 'error_trail.txt'
|
||||
with open(errfile, 'w', encoding='utf-8') as f:
|
||||
for line in error_trail:
|
||||
f.write(line + '\n')
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -19,6 +19,8 @@ from ultralytics.data.utils import IMG_FORMATS, VID_FORMATS
|
||||
from ultralytics.utils import LOGGER, is_colab, is_kaggle, ops
|
||||
from ultralytics.utils.checks import check_requirements
|
||||
|
||||
import subprocess
|
||||
import json
|
||||
|
||||
@dataclass
|
||||
class SourceTypes:
|
||||
@ -340,6 +342,12 @@ class LoadImagesAndVideos:
|
||||
|
||||
if success:
|
||||
success, im0 = self.cap.retrieve()
|
||||
##======================
|
||||
'''判断视频是否含旋转信息'''
|
||||
rotation = self.get_rotation(path)
|
||||
if rotation == 270:
|
||||
im0 = cv2.rotate(im0, cv2.ROTATE_90_COUNTERCLOCKWISE)
|
||||
###======================
|
||||
if success:
|
||||
self.frame += 1
|
||||
paths.append(path)
|
||||
@ -355,6 +363,7 @@ class LoadImagesAndVideos:
|
||||
self.cap.release()
|
||||
if self.count < self.nf:
|
||||
self._new_video(self.files[self.count])
|
||||
|
||||
else:
|
||||
self.mode = "image"
|
||||
im0 = cv2.imread(path) # BGR
|
||||
@ -378,6 +387,23 @@ class LoadImagesAndVideos:
|
||||
raise FileNotFoundError(f"Failed to open video {path}")
|
||||
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride)
|
||||
|
||||
def get_rotation(self, filename):
|
||||
cmd = [
|
||||
"ffprobe", # 注意是 ffprobe,不是 ffmpeg
|
||||
"-v", "error",
|
||||
"-select_streams", "v:0",
|
||||
"-show_entries", "stream_tags=rotate",
|
||||
"-of", "json",
|
||||
filename
|
||||
]
|
||||
result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
if result.returncode == 0:
|
||||
metadata = json.loads(result.stdout)
|
||||
rotation = metadata.get("streams", [{}])[0].get("tags", {}).get("rotate", 0)
|
||||
return int(rotation)
|
||||
else:
|
||||
return 0
|
||||
|
||||
def __len__(self):
|
||||
"""Returns the number of batches in the object."""
|
||||
return math.ceil(self.nf / self.bs) # number of files
|
||||
|
Binary file not shown.
BIN
ultralytics/engine/__pycache__/exporter.cpython-312.pyc
Normal file
BIN
ultralytics/engine/__pycache__/exporter.cpython-312.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user