last update in 2024

This commit is contained in:
王庆刚
2024-12-31 16:45:04 +08:00
parent dac3b3f2b6
commit 7e13e0f5b4
20 changed files with 1349 additions and 389 deletions

Binary file not shown.

View File

@ -6,10 +6,39 @@ Created on Mon Dec 16 18:56:18 2024
"""
import os
import cv2
import json
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib.font_manager import FontProperties
from scipy.spatial.distance import cdist
from utils.event import ShoppingEvent, save_data
def main():
rcParams['font.sans-serif'] = ['SimHei'] # 用黑体显示中文
rcParams['axes.unicode_minus'] = False # 正确显示负号
'''*********** USearch ***********'''
def read_usearch():
stdFeaturePath = r"D:\contrast\stdlib\v11_test.json"
stdBarcode = []
stdlib = {}
with open(stdFeaturePath, 'r', encoding='utf-8') as f:
data = json.load(f)
for dic in data['total']:
barcode = dic['key']
feature = np.array(dic['value'])
stdBarcode.append(barcode)
stdlib[barcode] = feature
return stdlib
def get_eventlist():
'''
读取一次测试中的错误事件
'''
evtpaths = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\images"
text1 = "one2n_Error.txt"
text2 = "one2SN_Error.txt"
@ -24,9 +53,16 @@ def main():
if line:
fpath=os.path.join(evtpaths, line)
events.append(fpath)
events = list(set(events))
return events
def single_event():
events = get_eventlist()
'''定义当前事件存储地址及生成相应文件件'''
resultPath = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\result\single_event"
@ -35,6 +71,291 @@ def main():
save_data(event, resultPath)
print(event.evtname)
def get_topk_percent(data, k):
"""
获取数据中最大的 k% 的元素
"""
# 将数据转换为 NumPy 数组
if isinstance(data, list):
data = np.array(data)
percentile = np.percentile(data, 100-k)
top_k_percent = data[data >= percentile]
return top_k_percent
def cluster(data, thresh=0.15):
# data = np.array([0.1, 0.13, 0.7, 0.2, 0.8, 0.52, 0.3, 0.7, 0.85, 0.58])
# data = np.array([0.1, 0.13, 0.2, 0.3])
# data = np.array([0.1])
if isinstance(data, list):
data = np.array(data)
data1 = np.sort(data)
cluter, Cluters, = [data1[0]], []
for i in range(1, len(data1)):
if data1[i] - data1[i-1]< thresh:
cluter.append(data1[i])
else:
Cluters.append(cluter)
cluter = [data1[i]]
Cluters.append(cluter)
clt_center = []
for clt in Cluters:
## 是否应该在此处限制一个聚类中的最小轨迹样本数,应该将该因素放在轨迹分析中
# if len(clt)>=3:
# clt_center.append(np.mean(clt))
clt_center.append(np.mean(clt))
# print(clt_center)
return clt_center
def calc_simil(event, stdfeat):
def calsiml(feat1, feat2):
'''轨迹样本和标准特征集样本相似度的选择策略'''
matrix = 1 - cdist(feat1, feat2, 'cosine')
simi_max = []
for i in range(len(matrix)):
sim = np.mean(get_topk_percent(matrix[i, :], 75))
simi_max.append(sim)
cltc_max = cluster(simi_max)
Simi = max(cltc_max)
## cltc_max为空属于编程考虑不周应予以排查解决
# if len(cltc_max):
# Simi = max(cltc_max)
# else:
# Simi = 0 #不应该走到该处
return Simi
front_boxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
front_feats = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
for i in range(len(event.front_boxes)):
front_boxes = np.concatenate((front_boxes, event.front_boxes[i]), axis=0)
front_feats = np.concatenate((front_feats, event.front_feats[i]), axis=0)
back_boxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
back_feats = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
for i in range(len(event.back_boxes)):
back_boxes = np.concatenate((back_boxes, event.back_boxes[i]), axis=0)
back_feats = np.concatenate((back_feats, event.back_feats[i]), axis=0)
if len(front_feats):
front_simi = calsiml(front_feats, stdfeat)
if len(back_feats):
back_simi = calsiml(back_feats, stdfeat)
'''前后摄相似度融合策略'''
if len(front_feats) and len(back_feats):
diff_simi = abs(front_simi - back_simi)
if diff_simi>0.15:
Similar = max([front_simi, back_simi])
else:
Similar = (front_simi+back_simi)/2
elif len(front_feats) and len(back_feats)==0:
Similar = front_simi
elif len(front_feats)==0 and len(back_feats):
Similar = back_simi
else:
Similar = None # 在event.front_feats和event.back_feats同时为空时
return Similar
def simi_matrix():
resultPath = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\result\single_event"
stdlib = read_usearch()
events = get_eventlist()
for evtpath in events:
evtname = os.path.basename(evtpath)
_, barcode = evtname.split("_")
# 生成事件与相应标准特征集
event = ShoppingEvent(evtpath)
stdfeat = stdlib[barcode]
Similar = calc_simil(event, stdfeat)
# 构造 boxes 子图存储路径
subimgpath = os.path.join(resultPath, f"{event.evtname}", "subimg")
if not os.path.exists(subimgpath):
os.makedirs(subimgpath)
histpath = os.path.join(resultPath, "simi_hist")
if not os.path.exists(histpath):
os.makedirs(histpath)
mean_values, max_values = [], []
cameras = ('front', 'back')
fig, ax = plt.subplots(2, 3, figsize=(16, 9), dpi=100)
kpercent = 25
for camera in cameras:
boxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
evtfeat = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
if camera == 'front':
for i in range(len(event.front_boxes)):
boxes = np.concatenate((boxes, event.front_boxes[i]), axis=0)
evtfeat = np.concatenate((evtfeat, event.front_feats[i]), axis=0)
imgpaths = event.front_imgpaths
else:
for i in range(len(event.back_boxes)):
boxes = np.concatenate((boxes, event.back_boxes[i]), axis=0)
evtfeat = np.concatenate((evtfeat, event.back_feats[i]), axis=0)
imgpaths = event.back_imgpaths
assert len(boxes)==len(evtfeat), f"Please check the Event: {evtname}"
if len(boxes)==0: continue
print(evtname)
matrix = 1 - cdist(evtfeat, stdfeat, 'cosine')
simi_1d = matrix.flatten()
simi_mean = np.mean(matrix, axis=1)
# simi_max = np.max(matrix, axis=1)
'''以相似度矩阵每一行最大的 k% 的相似度做均值计算'''
simi_max = []
for i in range(len(matrix)):
sim = np.mean(get_topk_percent(matrix[i, :], kpercent))
simi_max.append(sim)
mean_values.append(np.mean(matrix))
max_values.append(np.mean(simi_max))
diff_max_mean = np.mean(simi_max) - np.mean(matrix)
'''相似度统计特性图示'''
k =0
if camera == 'front': k = 1
'''********************* 相似度全体数据 *********************'''
ax[k, 0].hist(simi_1d, bins=60, range=(-0.2, 1), edgecolor='black')
ax[k, 0].set_xlim([-0.2, 1])
ax[k, 0].set_title(camera)
_, y_max = ax[k, 0].get_ylim() # 获取y轴范围
'''相似度变动范围'''
ax[k, 0].text(-0.1, 0.15*y_max, f"rng:{max(simi_1d)-min(simi_1d):.3f}", fontsize=18, color='b')
'''********************* 均值********************************'''
ax[k, 1].hist(simi_mean, bins=24, range=(-0.2, 1), edgecolor='black')
ax[k, 1].set_xlim([-0.2, 1])
ax[k, 1].set_title("mean")
_, y_max = ax[k, 1].get_ylim() # 获取y轴范围
'''相似度变动范围'''
ax[k, 1].text(-0.1, 0.15*y_max, f"rng:{max(simi_mean)-min(simi_mean):.3f}", fontsize=18, color='b')
'''********************* 最大值 ******************************'''
ax[k, 2].hist(simi_max, bins=24, range=(-0.2, 1), edgecolor='black')
ax[k, 2].set_xlim([-0.2, 1])
ax[k, 2].set_title("max")
_, y_max = ax[k, 2].get_ylim() # 获取y轴范围
'''相似度变动范围'''
ax[k, 2].text(-0.1, 0.15*y_max, f"rng:{max(simi_max)-min(simi_max):.3f}", fontsize=18, color='b')
'''绘制聚类中心'''
cltc_mean = cluster(simi_mean)
for value in cltc_mean:
ax[k, 1].axvline(x=value, color='m', linestyle='--', linewidth=3)
cltc_max = cluster(simi_max)
for value in cltc_max:
ax[k, 2].axvline(x=value, color='m', linestyle='--', linewidth=3)
'''绘制相似度均值与最大值均值'''
ax[k, 1].axvline(x=np.mean(matrix), color='r', linestyle='-', linewidth=3)
ax[k, 2].axvline(x=np.mean(simi_max), color='g', linestyle='-', linewidth=3)
'''绘制相似度最大值均值 - 均值'''
_, y_max = ax[k, 2].get_ylim() # 获取y轴范围
ax[k, 2].text(-0.1, 0.05*y_max, f"g-r={diff_max_mean:.3f}", fontsize=18, color='m')
plt.show()
# for i, box in enumerate(boxes):
# x1, y1, x2, y2, tid, score, cls, fid, bid = box
# imgpath = imgpaths[int(fid-1)]
# image = cv2.imread(imgpath)
# subimg = image[int(y1/2):int(y2/2), int(x1/2):int(x2/2), :]
# camerType, timeTamp, _, frameID = os.path.basename(imgpath).split('.')[0].split('_')
# subimgName = f"cam{camerType}_{i}_tid{int(tid)}_fid({int(fid)}, {frameID})_{simi_mean[i]:.3f}.png"
# imgpairs.append((subimgName, subimg))
# spath = os.path.join(subimgpath, subimgName)
# cv2.imwrite(spath, subimg)
# oldname = f"cam{camerType}_{i}_tid{int(tid)}_fid({int(fid)}, {frameID}).png"
# oldpath = os.path.join(subimgpath, oldname)
# if os.path.exists(oldpath):
# os.remove(oldpath)
if len(mean_values)==2:
mean_diff = abs(mean_values[1]-mean_values[0])
ax[0, 1].set_title(f"mean diff: {mean_diff:.3f}")
if len(max_values)==2:
max_values = abs(max_values[1]-max_values[0])
ax[0, 2].set_title(f"max diff: {max_values:.3f}")
try:
fig.suptitle(f"Similar: {Similar:.3f}", fontsize=16)
except Exception as e:
print(e)
print(f"Similar: {Similar}")
pltpath = os.path.join(subimgpath, f"hist_max_{kpercent}%_.png")
plt.savefig(pltpath)
pltpath1 = os.path.join(histpath, f"{evtname}_.png")
plt.savefig(pltpath1)
plt.close()
def main():
simi_matrix()
@ -42,3 +363,14 @@ def main():
if __name__ == "__main__":
main()
# cluster()

View File

@ -61,8 +61,8 @@ class Config:
test_val = "D:/比对/cl"
# test_val = "./data/test_data_100"
# test_model = "checkpoints/best_resnet18_v11.pth"
test_model = "checkpoints/zhanting_res_801.pth"
test_model = "checkpoints/best_resnet18_v11.pth"
# test_model = "checkpoints/zhanting_res_801.pth"

View File

@ -13,25 +13,23 @@ from scipy.spatial.distance import cdist
from utils.event import ShoppingEvent
def gen_eventdict(sourcePath, stype="data"):
def init_eventdict(sourcePath, stype="data"):
'''stype: str,
'source': 由 videos 或 images 生成的 pickle 文件
'data': 从 data 文件中读取的现场运行数据
'''
k, errEvents = 0, []
for source_path in sourcePath:
evtpath, bname = os.path.split(source_path)
for bname in os.listdir(sourcePath):
# bname = r"20241126-135911-bdf91cf9-3e9a-426d-94e8-ddf92238e175_6923555210479"
source_path = os.path.join(evtpath, bname)
source_path = os.path.join(sourcePath, bname)
if not os.path.isdir(source_path): continue
pickpath = os.path.join(eventDataPath, f"{bname}.pickle")
if os.path.isfile(pickpath): continue
try:
event = ShoppingEvent(source_path, stype)
# save_data(event, resultPath)
with open(pickpath, 'wb') as f:
pickle.dump(event, f)
@ -44,7 +42,7 @@ def gen_eventdict(sourcePath, stype="data"):
# break
errfile = os.path.join(resultPath, 'error_events.txt')
with open(errfile, 'w', encoding='utf-8') as f:
with open(errfile, 'a', encoding='utf-8') as f:
for line in errEvents:
f.write(line + '\n')
@ -61,6 +59,44 @@ def read_eventdict(eventDataPath):
return evtDict
def simi_calc(event, o2nevt, typee=None):
if typee == "11":
boxes1 = event.front_boxes
boxes2 = o2nevt.front_boxes
feat1 = event.front_feats
feat2 = o2nevt.front_feats
if typee == "10":
boxes1 = event.front_boxes
boxes2 = o2nevt.back_boxes
feat1 = event.front_feats
feat2 = o2nevt.back_feats
if typee == "00":
boxes1 = event.back_boxes
boxes2 = o2nevt.back_boxes
feat1 = event.back_feats
feat2 = o2nevt.back_feats
if typee == "01":
boxes1 = event.back_boxes
boxes2 = o2nevt.front_boxes
feat1 = event.back_feats
feat2 = o2nevt.front_feats
if len(feat1) and len(feat2):
matrix = 1 - cdist(feat1[0], feat2[0], 'cosine')
simi = np.mean(matrix)
else:
simi = None
return simi
def one2n_pr(evtDicts):
@ -81,45 +117,22 @@ def one2n_pr(evtDicts):
o2nevt = o2n_evt[0]
else:
continue
if typee == "11":
boxes1 = event.front_trackingboxes
boxes2 = o2nevt.front_trackingboxes
feat1 = event.front_trackingfeats
feat2 = o2nevt.front_trackingfeats
if typee == "10":
boxes1 = event.front_trackingboxes
boxes2 = o2nevt.back_trackingboxes
feat1 = event.front_trackingfeats
feat2 = o2nevt.back_trackingfeats
if typee == "00":
boxes1 = event.back_trackingboxes
boxes2 = o2nevt.back_trackingboxes
feat1 = event.back_trackingfeats
feat2 = o2nevt.back_trackingfeats
if typee == "01":
boxes1 = event.back_trackingboxes
boxes2 = o2nevt.front_trackingboxes
feat1 = event.back_trackingfeats
feat2 = o2nevt.front_trackingfeats
matrix = 1 - cdist(feat1[0], feat2[0], 'cosine')
simi_mean = np.mean(matrix)
simi_max = np.max(matrix)
simival = simi_calc(event, o2nevt, typee)
if simival==None:
continue
evt_names.append(nname)
evt_barcodes.append(barcode)
evt_similars.append(simi_mean)
evt_similars.append(simival)
evt_types.append(typee)
if len(evt_names)==len(evt_barcodes) and len(evt_barcodes)==len(evt_similars) \
and len(evt_similars)==len(evt_types) and len(evt_names)>0:
maxsim = evt_similars[evt_similars.index(max(evt_similars))]
# maxsim = evt_similars[evt_similars.index(max(evt_similars))]
maxsim = max(evt_similars)
for i in range(len(evt_names)):
bcd, simi = evt_barcodes[i], evt_similars[i]
@ -173,16 +186,16 @@ def one2n_pr(evtDicts):
plt.show()
## ============================= 1:n 直方图'''
fig, axes = plt.subplots(2, 2)
axes[0, 0].hist(tpsimi, bins=60, edgecolor='black')
axes[0, 0].hist(tpsimi, bins=60, range=(-0.2, 1), edgecolor='black')
axes[0, 0].set_xlim([-0.2, 1])
axes[0, 0].set_title('TP')
axes[0, 1].hist(fpsimi, bins=60, edgecolor='black')
axes[0, 1].hist(fpsimi, bins=60, range=(-0.2, 1), edgecolor='black')
axes[0, 1].set_xlim([-0.2, 1])
axes[0, 1].set_title('FP')
axes[1, 0].hist(tnsimi, bins=60, edgecolor='black')
axes[1, 0].hist(tnsimi, bins=60, range=(-0.2, 1), edgecolor='black')
axes[1, 0].set_xlim([-0.2, 1])
axes[1, 0].set_title('TN')
axes[1, 1].hist(fnsimi, bins=60, edgecolor='black')
axes[1, 1].hist(fnsimi, bins=60, range=(-0.2, 1), edgecolor='black')
axes[1, 1].set_xlim([-0.2, 1])
axes[1, 1].set_title('FN')
plt.show()
@ -192,16 +205,16 @@ def one2n_pr(evtDicts):
def main():
'''1. 生成事件字典并保存至 eventDataPath, 只需运行一次 '''
# gen_eventdict(sourcePath)
# init_eventdict(eventSourcePath)
'''2. 读取时间字典 '''
'''2. 读取事件字典 '''
evtDicts = read_eventdict(eventDataPath)
'''3. 1:n 比对事件评估 '''
fpevents = one2n_pr(evtDicts)
fpErrFile = str(Path(resultPath).joinpath("one2n_Error.txt"))
fpErrFile = str(Path(resultPath).joinpath("one2n_fp_Error.txt"))
with open(fpErrFile, "w") as file:
for item in fpevents:
file.write(item + "\n")
@ -214,8 +227,7 @@ def main():
if __name__ == '__main__':
eventSourcePath = [r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\images"]
eventSourcePath = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\images"
resultPath = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\result"
eventDataPath = os.path.join(resultPath, "evtobjs")

View File

@ -11,7 +11,7 @@ Created on Fri Aug 30 17:53:03 2024
标准特征提取,并保存至文件夹 stdFeaturePath 中,
也可在运行过程中根据与购物事件集合 barcodes 交集执行
2. 1:1 比对性能测试,
func: one2one_eval(similPath)
func: one2one_simi()
(1) 求购物事件和标准特征级 Barcode 交集,构造 evtDict、stdDict
(2) 构造扫 A 放 A、扫 A 放 B 组合mergePairs = AA_list + AB_list
(3) 循环计算 mergePairs 中元素 "(A, A) 或 (A, B)" 相似度;
@ -20,7 +20,7 @@ Created on Fri Aug 30 17:53:03 2024
3. precise、recall等指标计算
func: compute_precise_recall(pickpath)
func: compute_one2one_pr(pickpath)
@author: ym
@ -33,6 +33,9 @@ import sys
import random
import pickle
import json
import random
import copy
import sys
# import torch
import time
# import json
@ -54,6 +57,7 @@ from feat_extract.config import config as conf
from feat_extract.inference import FeatsInterface
from utils.event import ShoppingEvent, save_data
from genfeats import gen_bcd_features
from event_test import calc_simil
@ -175,14 +179,53 @@ def data_precision_compare(stdfeat, evtfeat, evtMessage, save=True):
f.write(line + '\n')
def one2one_simi():
def simi_calc(event, stdfeat):
evtfeat = event.feats_compose
if isinstance(event.feats_select, list):
if len(event.feats_select) and len(event.feats_select[0]):
evtfeat = event.feats_select[0]
else:
return None, None, None
else:
evtfeat = event.feats_select
if len(evtfeat)==0 or len(stdfeat)==0:
return None, None, None
matrix = 1 - cdist(evtfeat, stdfeat, 'cosine')
matrix[matrix < 0] = 0
simi_mean = np.mean(matrix)
simi_max = np.max(matrix)
stdfeatm = np.mean(stdfeat, axis=0, keepdims=True)
evtfeatm = np.mean(evtfeat, axis=0, keepdims=True)
simi_mfeat = 1- np.maximum(0.0, cdist(stdfeatm, evtfeatm, 'cosine'))
return simi_mean, simi_max, simi_mfeat[0,0]
def build_std_evt_dict():
'''
stdFeaturePath: 标准特征集地址
eventDataPath: Event对象地址
'''
stdBarcode = [p.stem for p in Path(stdFeaturePath).iterdir() if p.is_file() and p.suffix=='.pickle']
# stdBarcode = [p.stem for p in Path(stdFeaturePath).iterdir() if p.is_file() and p.suffix=='.json']
'''*********** USearch ***********'''
stdFeaturePath = r"D:\contrast\stdlib\v11_test.json"
stdBarcode = []
stdlib = {}
with open(stdFeaturePath, 'r', encoding='utf-8') as f:
data = json.load(f)
for dic in data['total']:
barcode = dic['key']
feature = np.array(dic['value'])
stdBarcode.append(barcode)
stdlib[barcode] = feature
'''======1. 购物事件列表,该列表中的 Barcode 存在于标准的 stdBarcode 内 ==='''
evtList = [(p.stem, p.stem.split('_')[-1]) for p in Path(eventDataPath).iterdir()
if p.is_file()
@ -192,16 +235,21 @@ def one2one_simi():
and p.stem.split('_')[-1] in stdBarcode
]
barcodes = set([bcd for _, bcd in evtList])
'''======2. 构建用于比对的标准特征字典 ============='''
# stdDict = {}
# for barcode in barcodes:
# stdpath = os.path.join(stdFeaturePath, barcode+'.json')
# with open(stdpath, 'r', encoding='utf-8') as f:
# stddata = json.load(f)
# feat = np.array(stddata["value"])
# stdDict[barcode] = feat
'''*********** USearch ***********'''
stdDict = {}
for barcode in barcodes:
stdpath = os.path.join(stdFeaturePath, barcode+'.pickle')
with open(stdpath, 'rb') as f:
stddata = pickle.load(f)
stdDict[barcode] = stddata
stdDict[barcode] = stdlib[barcode]
'''======3. 构建用于比对的操作事件字典 ============='''
evtDict = {}
for evtname, barcode in evtList:
@ -209,21 +257,123 @@ def one2one_simi():
with open(evtpath, 'rb') as f:
evtdata = pickle.load(f)
evtDict[evtname] = evtdata
return evtList, evtDict, stdDict
def one2SN_pr(evtList, evtDict, stdDict):
std_barcodes = set([bcd for _, bcd in evtList])
tp_events, fn_events, fp_events, tn_events = [], [], [], []
tp_simi, fn_simi, tn_simi, fp_simi = [], [], [], []
errorFile_one2SN = []
SN = 9
for evtname, barcode in evtList:
bcd_selected = [barcode]
dset = list(std_barcodes - set([barcode]))
if len(dset) > SN:
random.shuffle(dset)
bcd_selected.extend(dset[:SN])
else:
bcd_selected.extend(dset)
event = evtDict[evtname]
## 无轨迹判断
if len(event.front_feats)+len(event.back_feats)==0:
print(evtname)
continue
barcodes, similars = [], []
for stdbcd in bcd_selected:
stdfeat = stdDict[stdbcd]
# simi_mean, simi_max, simi_mfeat = simi_calc(event, stdfeat)
simi_mean = calc_simil(event, stdfeat)
## 在event.front_feats和event.back_feats同时为空时此处不需要保护
# if simi_mean==None:
# continue
barcodes.append(stdbcd)
similars.append(simi_mean)
## 此处不需要保护
# if len(similars)==0:
# print(evtname)
# continue
max_idx = similars.index(max(similars))
max_sim = similars[max_idx]
for i in range(len(barcodes)):
bcd, simi = barcodes[i], similars[i]
if bcd==barcode and simi==max_sim:
tp_simi.append(simi)
tp_events.append(evtname)
elif bcd==barcode and simi!=max_sim:
fn_simi.append(simi)
fn_events.append(evtname)
elif bcd!=barcode and simi!=max_sim:
tn_simi.append(simi)
tn_events.append(evtname)
elif bcd!=barcode and simi==max_sim and barcode in barcodes:
fp_simi.append(simi)
fp_events.append(evtname)
else:
errorFile_one2SN.append(evtname)
PPreciseX, PRecallX = [], []
NPreciseX, NRecallX = [], []
Thresh = np.linspace(-0.2, 1, 100)
for th in Thresh:
'''适用于 (Precise, Recall) 计算方式多个相似度计算并排序barcode相等且排名第一为 TP '''
'''===================================== 1:SN '''
TPX = sum(np.array(tp_simi) >= th)
FPX = sum(np.array(fp_simi) >= th)
FNX = sum(np.array(fn_simi) < th)
TNX = sum(np.array(tn_simi) < th)
PPreciseX.append(TPX/(TPX+FPX+1e-6))
PRecallX.append(TPX/(len(tp_simi)+len(fn_simi)+1e-6))
NPreciseX.append(TNX/(TNX+FNX+1e-6))
NRecallX.append(TNX/(len(tn_simi)+len(fp_simi)+1e-6))
fig, ax = plt.subplots()
ax.plot(Thresh, PPreciseX, 'r', label='Precise_Pos: TP/TPFP')
ax.plot(Thresh, PRecallX, 'b', label='Recall_Pos: TP/TPFN')
ax.plot(Thresh, NPreciseX, 'g', label='Precise_Neg: TN/TNFP')
ax.plot(Thresh, NRecallX, 'c', label='Recall_Neg: TN/TNFN')
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.grid(True)
ax.set_title('1:SN Precise & Recall')
ax.set_xlabel(f"Event Num: {len(evtList)}")
ax.legend()
plt.show()
## ============================= 1:N 展厅 直方图'''
fig, axes = plt.subplots(2, 2)
axes[0, 0].hist(tp_simi, bins=60, range=(-0.2, 1), edgecolor='black')
axes[0, 0].set_xlim([-0.2, 1])
axes[0, 0].set_title(f'TP({len(tp_simi)})')
axes[0, 1].hist(fp_simi, bins=60, range=(-0.2, 1), edgecolor='black')
axes[0, 1].set_xlim([-0.2, 1])
axes[0, 1].set_title(f'FP({len(fp_simi)})')
axes[1, 0].hist(tn_simi, bins=60, range=(-0.2, 1), edgecolor='black')
axes[1, 0].set_xlim([-0.2, 1])
axes[1, 0].set_title(f'TN({len(tn_simi)})')
axes[1, 1].hist(fn_simi, bins=60, range=(-0.2, 1), edgecolor='black')
axes[1, 1].set_xlim([-0.2, 1])
axes[1, 1].set_title(f'FN({len(fn_simi)})')
plt.show()
'''======4.2 barcode 标准图像保存 =================='''
# for stdbcd in barcodes:
# stdImgpath = stdDict[stdbcd]["imgpaths"]
# pstdpath = os.path.join(subimgPath, f"{stdbcd}")
# if not os.path.exists(pstdpath):
# os.makedirs(pstdpath)
# ii = 1
# for filepath in stdImgpath:
# stdpath = os.path.join(pstdpath, f"{stdbcd}_{ii}.png")
# shutil.copy2(filepath, stdpath)
# ii += 1
'''======5 构造 3 个事件对: 扫 A 放 A, 扫 A 放 B, 合并 ===================='''
def one2one_simi(evtList, evtDict, stdDict):
barcodes = set([bcd for _, bcd in evtList])
'''======1 构造 3 个事件对: 扫 A 放 A, 扫 A 放 B, 合并 ===================='''
AA_list = [(evtname, barcode, "same") for evtname, barcode in evtList]
AB_list = []
for evtname, barcode in evtList:
@ -234,45 +384,36 @@ def one2one_simi():
mergePairs = AA_list + AB_list
'''======6 计算事件、标准特征集相似度 =================='''
'''======2 计算事件、标准特征集相似度 =================='''
rltdata = []
for i in range(len(mergePairs)):
evtname, stdbcd, label = mergePairs[i]
event = evtDict[evtname]
##============================================ float32
stdfeat = stdDict[stdbcd]["feats_ft32"]
evtfeat = event.feats_compose
if len(evtfeat)==0: continue
matrix = 1 - cdist(stdfeat, evtfeat, 'cosine')
matrix[matrix < 0] = 0
simi_mean = np.mean(matrix)
simi_max = np.max(matrix)
stdfeatm = np.mean(stdfeat, axis=0, keepdims=True)
evtfeatm = np.mean(evtfeat, axis=0, keepdims=True)
simi_mfeat = 1- np.maximum(0.0, cdist(stdfeatm, evtfeatm, 'cosine'))
rltdata.append((label, stdbcd, evtname, simi_mean, simi_max, simi_mfeat[0,0]))
if len(event.feats_compose)==0: continue
stdfeat = stdDict[stdbcd] # float32
simi_mean, simi_max, simi_mfeat = simi_calc(event, stdfeat)
if simi_mean is None:
continue
rltdata.append((label, stdbcd, evtname, simi_mean, simi_max, simi_mfeat))
'''================ float32、16、int8 精度比较与存储 ============='''
# data_precision_compare(stdfeat, evtfeat, mergePairs[i], save=True)
print("func: one2one_eval(), have finished!")
return rltdata
def compute_precise_recall(rltdata):
def one2one_pr(rltdata):
Same, Cross = [], []
for label, stdbcd, evtname, simi_mean, simi_max, simi_mft in rltdata:
if label == "same":
Same.append(simi_mean)
Same.append(simi_max)
if label == "diff":
Cross.append(simi_mean)
Cross.append(simi_max)
Same = np.array(Same)
Cross = np.array(Cross)
@ -280,11 +421,11 @@ def compute_precise_recall(rltdata):
TNFP = len(Cross)
# fig, axs = plt.subplots(2, 1)
# axs[0].hist(Same, bins=60, edgecolor='black')
# axs[0].hist(Same, bins=60, range=(-0.2, 1), edgecolor='black')
# axs[0].set_xlim([-0.2, 1])
# axs[0].set_title(f'Same Barcode, Num: {TPFN}')
# axs[1].hist(Cross, bins=60, edgecolor='black')
# axs[1].hist(Cross, bins=60, range=(-0.2, 1), edgecolor='black')
# axs[1].set_xlim([-0.2, 1])
# axs[1].set_title(f'Cross Barcode, Num: {TNFP}')
# plt.savefig(f'./result/{file}_hist.png') # svg, png, pdf
@ -324,6 +465,23 @@ def compute_precise_recall(rltdata):
rltpath = os.path.join(similPath, 'pr.png')
plt.savefig(rltpath) # svg, png, pdf
fig, axes = plt.subplots(2,1)
axes[0].hist(Same, bins=60, range=(-0.2, 1), edgecolor='black')
axes[0].set_xlim([-0.2, 1])
axes[0].set_title(f'TP({len(Same)})')
axes[1].hist(Cross, bins=60, range=(-0.2, 1), edgecolor='black')
axes[1].set_xlim([-0.2, 1])
axes[1].set_title(f'TN({len(Cross)})')
rltpath = os.path.join(similPath, 'hist.png')
plt.savefig(rltpath)
plt.show()
def gen_eventdict(sourcePath, saveimg=True):
k, errEvents = 0, []
@ -358,9 +516,7 @@ def gen_eventdict(sourcePath, saveimg=True):
f.write(line + '\n')
def test_one2one():
def init_std_evt_dict():
'''==== 0. 生成事件列表和对应的 Barcodes列表 ==========='''
bcdList, event_spath = [], []
for evtpath in eventSourcePath:
@ -383,10 +539,33 @@ def test_one2one():
print("eventList have generated and saved!")
'''==== 3. 1:1性能评估 ==============='''
rltdata = one2one_simi()
compute_precise_recall(rltdata)
def test_one2one():
'''1:1性能评估'''
# 1. 只需运行一次,生成事件字典和相应的标准特征库字典
# init_std_evt_dict()
# 2. 基于事件barcode集和标准库barcode交集构造事件集合
evtList, evtDict, stdDict = build_std_evt_dict()
rltdata = one2one_simi(evtList, evtDict, stdDict)
one2one_pr(rltdata)
def test_one2SN():
'''1:SN性能评估'''
# 1. 只需运行一次,生成事件字典和相应的标准特征库字典
# init_std_evt_dict()
# 2. 事件barcode集和标准库barcode求交集
evtList, evtDict, stdDict = build_std_evt_dict()
one2SN_pr(evtList, evtDict, stdDict)
if __name__ == '__main__':
'''
@ -402,7 +581,7 @@ if __name__ == '__main__':
stdSamplePath = r"\\192.168.1.28\share\数据\已完成数据\展厅数据\v1.0\比对数据\整理\zhantingBase"
stdBarcodePath = r"D:\exhibition\dataset\bcdpath"
stdFeaturePath = r"D:\exhibition\dataset\feats"
stdFeaturePath = r"\\192.168.1.28\share\数据\已完成数据\比对数据\barcode\all_totalBarocde\features_json\v11_barcode_11592"
# eventSourcePath = [r'D:\exhibition\images\20241202']
# eventSourcePath = [r"\\192.168.1.28\share\测试视频数据以及日志\各模块测试记录\展厅测试\1129_展厅模型v801测试组测试"]
@ -419,6 +598,8 @@ if __name__ == '__main__':
os.makedirs(similPath)
test_one2one()
# test_one2SN()

View File

@ -107,9 +107,7 @@ def test_compare():
def contrast_pr(paths):
'''
1:1
'''
'''
paths = Path(paths)
evtpaths = []
@ -117,25 +115,19 @@ def contrast_pr(paths):
condt1 = p.is_dir()
condt2 = len(p.name.split('_'))>=2
condt3 = len(p.name.split('_')[-1])>8
condt4 = p.name.split('_')[-1].isdigit()
condt4 = p.name.split('_')[-1].isdigit()
if condt1 and condt2 and condt3 and condt4:
evtpaths.append(p)
# evtpaths = [p for p in paths.iterdir() if p.is_dir() and len(p.name.split('_'))>=2 and len(p.name.split('_')[-1])>8]
# evtpaths = [p for p in paths.iterdir() if p.is_dir()]
events, similars = [], []
##===================================== 扫A放A, 扫A放B场景()
one2oneAA, one2oneAB = [], []
one2SNAA, one2SNAB = [], []
##===================================== 应用于 11
_tp_events, _fn_events, _fp_events, _tn_events = [], [], [], []
_tp_simi, _fn_simi, _tn_simi, _fp_simi = [], [], [], []
@ -406,9 +398,12 @@ def contrast_pr(paths):
axes[0].hist(np.array(one2SNAA), bins=60, edgecolor='black')
axes[0].set_xlim([-0.2, 1])
axes[0].set_title('AA')
axes[0].set_xlabel(f"Event Num: {len(one2SNAA)}")
axes[1].hist(np.array(one2SNAB), bins=60, edgecolor='black')
axes[1].set_xlim([-0.2, 1])
axes[1].set_title('BB')
axes[1].set_xlabel(f"Event Num: {len(one2SNAB)}")
plt.show()
''''3. ============================= 1:SN 曲线'''
@ -428,16 +423,16 @@ def contrast_pr(paths):
fig, axes = plt.subplots(2, 2)
axes[0, 0].hist(tp_simi, bins=60, edgecolor='black')
axes[0, 0].set_xlim([-0.2, 1])
axes[0, 0].set_title('TP')
axes[0, 0].set_title(f'TP({len(tp_simi)})')
axes[0, 1].hist(fp_simi, bins=60, edgecolor='black')
axes[0, 1].set_xlim([-0.2, 1])
axes[0, 1].set_title('FP')
axes[0, 1].set_title(f'FP({len(fp_simi)})')
axes[1, 0].hist(tn_simi, bins=60, edgecolor='black')
axes[1, 0].set_xlim([-0.2, 1])
axes[1, 0].set_title('TN')
axes[1, 0].set_title(f'TN({len(tn_simi)})')
axes[1, 1].hist(fn_simi, bins=60, edgecolor='black')
axes[1, 1].set_xlim([-0.2, 1])
axes[1, 1].set_title('FN')
axes[1, 1].set_title(f'FN({len(fn_simi)})')
plt.show()
@ -458,16 +453,16 @@ def contrast_pr(paths):
fig, axes = plt.subplots(2, 2)
axes[0, 0].hist(tpsimi, bins=60, edgecolor='black')
axes[0, 0].set_xlim([-0.2, 1])
axes[0, 0].set_title('TP')
axes[0, 0].set_title(f'TP({len(tpsimi)})')
axes[0, 1].hist(fpsimi, bins=60, edgecolor='black')
axes[0, 1].set_xlim([-0.2, 1])
axes[0, 1].set_title('FP')
axes[0, 1].set_title(f'FP({len(fpsimi)})')
axes[1, 0].hist(tnsimi, bins=60, edgecolor='black')
axes[1, 0].set_xlim([-0.2, 1])
axes[1, 0].set_title('TN')
axes[1, 0].set_title(f'TN({len(tnsimi)})')
axes[1, 1].hist(fnsimi, bins=60, edgecolor='black')
axes[1, 1].set_xlim([-0.2, 1])
axes[1, 1].set_title('FN')
axes[1, 1].set_title(f'FN({len(fnsimi)})')
plt.show()
@ -500,7 +495,7 @@ def contrast_pr(paths):
if __name__ == "__main__":
evtpaths = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\images"
evtpaths = r"D:\全实时\source_data\2024122416"
contrast_pr(evtpaths)

117
contrast/select_subimgs.py Normal file
View File

@ -0,0 +1,117 @@
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 23 13:58:13 2024
writting for selectting std subimgs to Wuhuaqi
@author: ym
"""
import os
import time
# import torch
import pickle
# import json
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from scipy.spatial.distance import cdist
from feat_extract.config import config as conf
# from model import resnet18 as resnet18
from feat_extract.inference import FeatsInterface #, inference_image
IMG_FORMAT = ['.bmp', '.jpg', '.jpeg', '.png']
def gen_features(imgpath):
Encoder = FeatsInterface(conf)
imgs, imgnames = [], []
for filename in os.listdir(imgpath):
file, ext = os.path.splitext(filename)
if ext not in IMG_FORMAT: continue
fpath = os.path.join(imgpath, filename)
img = Image.open(fpath)
imgs.append(img)
filelist = file.split("_")
newname = "_".join([filelist[0],filelist[1], filelist[2], filelist[-3], filelist[-2], filelist[-1]])
# imgnames.append(newname)
imgnames.append(file)
features = Encoder.inference(imgs)
features /= np.linalg.norm(features, axis=1)[:, None]
return features, imgnames
def top_p_percent_indices(matrix, p):
"""
Finds the indices of the top p% largest elements in a 2D matrix.
Args:
matrix (np.ndarray): A 2D NumPy array.
p: int, 0-100
Returns:
List[Tuple[int, int]]: A list of indices (row, column) for the top 10% largest elements.
"""
# Flatten the matrix
flat_matrix = matrix.flatten()
# Calculate the threshold for the top 10%
num_elements = len(flat_matrix)
threshold_index = int(num_elements * 0.01*p) # Top 10%
threshold_index = max(1, threshold_index) # Ensure at least one element is considered
threshold_value = np.partition(flat_matrix, -threshold_index)[-threshold_index]
# Create a mask for elements >= threshold
mask = matrix >= threshold_value
# Get the indices of elements that satisfy the mask
indices = np.argwhere(mask)
return list(map(tuple, indices))
def main():
imgpath = r"\\192.168.1.28\share\数据\已完成数据\展厅数据\v1.0\比对数据\整理\zhantingBase\6923555210479"
feats, imgnames = gen_features(imgpath)
n = len(feats)
matrix = 1 - cdist(feats, feats, 'cosine')
nmatrix = np.array([[matrix[i][j] for j in range(n) if i != j] for i in range(n)])
top_p_large_index = top_p_percent_indices(nmatrix, 1)
top_p_small_index = top_p_percent_indices(-1*nmatrix, 1)
simi_mean = np.mean(nmatrix, axis=1)
max_simi = np.max(nmatrix)
max_index = np.where(nmatrix==max_simi)
min_simi = np.min(nmatrix)
min_index = np.where(nmatrix==min_simi)
fig, ax = plt.subplots()
simils = [matrix[i][j] for j in range(n) for i in range(n) if j>i]
ax.hist(simils, bins=60, range=(-0.2, 1), edgecolor='black')
ax.set_xlim([-0.2, 1])
ax.set_title("Similarity")
print("done!")
if __name__ == '__main__':
main()

View File

@ -15,6 +15,7 @@ sys.path.append(r"D:\DetectTracking")
from tracking.utils.plotting import Annotator, colors
from tracking.utils.drawtracks import drawTrack
from tracking.utils.read_data import extract_data, read_tracking_output, read_similar
from tracking.utils.read_data import extract_data_realtime, read_tracking_output_realtime
IMG_FORMAT = ['.bmp', '.jpg', '.jpeg', '.png']
VID_FORMAT = ['.mp4', '.avi']
@ -117,10 +118,12 @@ class ShoppingEvent:
if stype=="data":
self.from_datafile(eventpath)
if stype=="realtime":
self.from_realtime_datafile(eventpath)
if stype=="source":
self.from_source_pkl(eventpath)
self.feats_select = []
self.feats_select = np.empty((0, 256), dtype=np.float64)
self.feats_compose = np.empty((0, 256), dtype=np.float64)
self.select_feats()
self.compose_feats()
@ -289,6 +292,66 @@ class ShoppingEvent:
elif CamerType == '1':
self.front_boxes = tracking_output_boxes
self.front_feats = tracking_output_feats
def from_realtime_datafile(self, eventpath):
# evtList = self.evtname.split('_')
# if len(evtList)>=2 and len(evtList[-1])>=10 and evtList[-1].isdigit():
# self.barcode = evtList[-1]
# if len(evtList)==3 and evtList[-1]== evtList[-2]:
# self.evtType = 'input'
# else:
# self.evtType = 'other'
'''================ path of video ============='''
for vidname in os.listdir(eventpath):
name, ext = os.path.splitext(vidname)
if ext not in VID_FORMAT: continue
vidpath = os.path.join(eventpath, vidname)
CamerType = name.split('_')[0]
if CamerType == '0':
self.back_videopath = vidpath
if CamerType == '1':
self.front_videopath = vidpath
'''================ process.data ============='''
procpath = Path(eventpath).joinpath('process.data')
if procpath.is_file():
SimiDict = read_similar(procpath)
self.one2one = SimiDict['one2one']
self.one2n = SimiDict['one2n']
self.one2SN = SimiDict['one2SN']
'''=========== 0/1_track.data & 0/1_tracking_output.data ======='''
for dataname in os.listdir(eventpath):
datapath = os.path.join(eventpath, dataname)
if not os.path.isfile(datapath): continue
CamerType = dataname.split('_')[0]
'''========== 0/1_track.data =========='''
if dataname.find("_track.data")>0:
trackerboxes, trackerfeats = extract_data_realtime(datapath)
if CamerType == '0':
self.back_trackerboxes = trackerboxes
self.back_trackerfeats = trackerfeats
if CamerType == '1':
self.front_trackerboxes = trackerboxes
self.front_trackerfeats = trackerfeats
'''========== 0/1_tracking_output.data =========='''
if dataname.find("_tracking_output.data")>0:
trackingboxes, trackingfeats, tracking_outboxes, tracking_outfeats = read_tracking_output_realtime(datapath)
if CamerType == '0':
self.back_trackingboxes = trackingboxes
self.back_trackingfeats = trackingfeats
self.back_boxes = tracking_outboxes
self.back_feats = tracking_outfeats
elif CamerType == '1':
self.front_trackingboxes = trackingboxes
self.front_trackingfeats = trackingfeats
self.front_boxes = tracking_outboxes
self.front_feats = tracking_outfeats
@ -305,11 +368,10 @@ class ShoppingEvent:
def select_feats(self):
'''事件的特征选择'''
self.feats_select = []
if len(self.front_feats):
self.feats_select = self.front_feats
self.feats_select = self.front_feats[0]
elif len(self.back_feats):
self.feats_select = self.back_feats
self.feats_select = self.back_feats[0]
def plot_save_image(self, savepath):

View File

@ -120,9 +120,7 @@ def pipeline(
save_dir_video = save_dir_event / Path(str(Path(vpath).stem))
if not save_dir_video.exists():
save_dir_video.mkdir(parents=True, exist_ok=True)
'''Yolo + Resnet + Tracker'''
optdict["source"] = vpath
@ -212,15 +210,7 @@ def pipeline(
trajpath = os.path.join(save_dir_event, "traj.png")
cv2.imwrite(trajpath, img_cat)
'''前后摄轨迹选择'''
if stdfeat_path is not None:
with open(stdfeat_path, 'rb') as f:
featDict = pickle.load(f)
def main_loop():
bcdpath = r"\\192.168.1.28\share\测试_202406\contrast\std_barcodes_2192"
@ -264,12 +254,12 @@ def main():
'''
函数pipeline(),遍历事件文件夹,选择类型 image 或 video,
'''
evtdir = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\images"
evtdir = r"D:\全实时\source_data\2024122416"
evtdir = Path(evtdir)
parmDict = {}
parmDict["savepath"] = r"D:\contrast\detect\pipeline"
parmDict["SourceType"] = "image" # video, image
parmDict["savepath"] = r"D:\全实时\result\pipeline"
parmDict["SourceType"] = "video" # video, image
parmDict["stdfeat_path"] = None
k = 0
@ -278,16 +268,16 @@ def main():
if item.is_dir():
# item = r"D:\exhibition\images\images2\images2"
parmDict["eventpath"] = item
pipeline(**parmDict)
# pipeline(**parmDict)
# try:
# pipeline(**parmDict)
# except Exception as e:
# errEvents.append(str(item))
try:
pipeline(**parmDict)
except Exception as e:
errEvents.append(str(item))
k+=1
if k==1:
break
# k+=1
# if k==1:
# break
errfile = os.path.join(parmDict["savepath"], f'error_events.txt')
with open(errfile, 'w', encoding='utf-8') as f:

137
realtime/full_realtime.py Normal file
View File

@ -0,0 +1,137 @@
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 25 09:17:32 2024
@author: ym
"""
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
from contrast.utils.event import ShoppingEvent
sys.path.append(r"D:\DetectTracking")
from tracking.utils.read_data import read_weight_sensor, extract_data_realtime, read_tracking_output_realtime
from tracking.utils.read_data import read_process
def time_std2stamp(custom_time):
date_part = custom_time.split('-')[0]
time_part = custom_time.split('-')[1]
ms_part = int(custom_time.split('-')[2])
standard_time = f"{date_part} {time_part[:2]}:{time_part[2:4]}:{time_part[4:]}"
dt = datetime.strptime(standard_time, "%Y%m%d %H:%M:%S")
timestamp = int(dt.timestamp() * 1000) + ms_part
return timestamp
def time_stamp2std(timestamp):
if isinstance(timestamp, float) or isinstance(timestamp, str):
timestamp = int(timestamp)
ms = timestamp%1000
times = timestamp//1000
std_time = datetime.fromtimestamp(times)
stdtime = std_time.strftime("%Y%m%d-%H%M%S") + '-' +str(ms)
return stdtime
def get_timeduring_weight(procpath):
eventStart, eventEnd, weightValue = None, None, None
if os.path.isfile(procpath):
timeDict = read_process(procpath)
if "eventStart" in timeDict.keys():
eventStart = timeDict["eventStart"]
if "eventEnd" in timeDict.keys():
eventEnd = timeDict["eventEnd"]
if "weightValue" in timeDict.keys():
weightValue = timeDict["weightValue"]
return eventStart, eventEnd, weightValue
def event_devide(wpath):
'''
基于重力时序数据 _weight.data 进行事件切分
'''
# wpath = r'D:\全实时\source_data\2024122416\20241224-162658370_weight.data'
tpath, _ = os.path.split(wpath)
wsdata = read_weight_sensor(wpath)
times, weights = wsdata[:, 0], wsdata[:, 1]
Start, End = times[0], times[-1]
evtpaths, evtTimeWeight = [], []
for filename in os.listdir(tpath):
filelist = filename.split('_')
custom_time = filelist[0]
evtpath = os.path.join(tpath, filename)
if os.path.isdir(evtpath):
stamp = time_std2stamp(custom_time)
if stamp >= Start and stamp <= End:
evtpaths.append(evtpath)
for evtpath in evtpaths:
evtname = os.path.basename(evtpath)
event = ShoppingEvent(evtpath, stype = "realtime")
# try:
# event = ShoppingEvent(evtpath, stype = "realtime")
# except Exception as e:
# print(f"Error is: {e}", evtname)
'''读取事件的起止时间、重力变化值'''
propath = os.path.join(evtpath, "process.data")
evtStart, evtEnd, wgtValue = get_timeduring_weight(propath)
evtTimeWeight.append((evtStart, evtEnd, wgtValue))
'''重力变化曲线、事件起止区间'''
fig, ax1 = plt.subplots(figsize=(16, 9), dpi=100)
ax1.plot(times-Start, weights, 'bo-', linewidth=1, markersize=3)
ax1.set_title('Weight (gram)')
for t0, t1, w in evtTimeWeight:
min_diff = float('inf')
index = None
for i, t in enumerate(times):
diff = abs(t0 - t)
if diff < min_diff:
min_diff = diff
index = i
w0 = weights[index]
w1 = w0 + w
ax1.plot((t0, t0) - Start, (w0, w1), 'r*-', linewidth=1, markersize=6)
ax1.plot((t1, t1) - Start, (w0, w1), 'r*-', linewidth=1, markersize=6)
ax1.plot((t0, t1) - Start, (w1, w1), 'r*-', linewidth=1, markersize=6)
ax1.grid(True)
plt.show()
return plt
def main():
tpath = r"D:\全实时\source_data\2024122416"
rltpath = r"D:\全实时\result"
for filename in os.listdir(tpath):
bname = filename.split("_")[0]
if filename.find("_weight.data") <= 0:
continue
wpath = os.path.join(tpath, filename)
plt = event_devide(wpath)
plt.savefig(os.path.join(rltpath, f'{bname}.png' )) # svg, png, pdf
print(filename)
print("Done!")
if __name__ == "__main__":
main()

View File

@ -17,7 +17,7 @@ import copy
import matplotlib.pyplot as plt
from imgs_inference import run_yolo
from event_time_specify import devide_motion_state#, state_measure
from tracking.utils.read_data import read_seneor
from tracking.utils.read_data import read_weight_sensor
# IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes
# VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes
@ -450,10 +450,10 @@ def show_seri():
'''===============读取重力信号数据==================='''
seneorfile = os.path.join(datapath, 'sensor.txt')
WeightDict = read_seneor(seneorfile)
weights = read_weight_sensor(seneorfile)
weights = [(float(t), w) for t, w in WeightDict.items()]
weights = np.array(weights)
# weights = [(float(t), w) for t, w in WeightDict.items()]
# weights = np.array(weights)
'''===============重力、图像信息融合==================='''

View File

@ -214,15 +214,16 @@ def yolo_resnet_tracker(
# NMS
with dt[2]:
pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
if dataset.mode == "video":
frameId = dataset.frame
else:
frameId = dataset.count
# Process predictions
for i, det in enumerate(pred): # per image
im0 = im0s.copy()
s += '%gx%g ' % im.shape[2:] # print string
annotator = Annotator(im0.copy(), line_width=line_thickness, example=str(names))
s += '%gx%g ' % im.shape[2:] # print string
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()
@ -235,46 +236,39 @@ def yolo_resnet_tracker(
'''
det_tracking = Boxes(det, im0.shape).cpu().numpy()
tracks = tracker.update(det_tracking, im0)
if len(tracks) == 0:
continue
if dataset.mode == "video":
frameId = dataset.frame
else:
frameId = dataset.count
tracks[:, 7] = frameId
# trackerBoxes = np.concatenate([trackerBoxes, tracks], axis=0)
'''================== 1. 存储 dets/subimgs/features Dict ============='''
imgs, features = ReIDEncoder.inference(im0, tracks)
featdict = {}
for ii, bid in enumerate(tracks[:, 8]):
featdict.update({f"{int(frameId)}_{int(bid)}": features[ii, :]}) # [f"feat_{int(bid)}"] = features[i, :]
frameDict = {"path": path,
"fid": int(frameId),
"bboxes": det,
"tboxes": tracks,
"imgs": imgs,
"feats": featdict}
yoloResnetTracker.append(frameDict)
# imgs, features = inference_image(im0, tracks)
# TrackerFeats = np.concatenate([TrackerFeats, features], axis=0)
'''================== 2. 提取手势位置 ==================='''
for *xyxy, id, conf, cls, fid, bid in reversed(tracks):
name = ('' if id==-1 else f'id:{int(id)} ') + names[int(cls)]
label = None if hide_labels else (name if hide_conf else f'{name} {conf:.2f}')
if id >=0 and cls==0:
color = colors(int(cls), True)
elif id >=0 and cls!=0:
color = colors(int(id), True)
else:
color = colors(19, True) # 19为调色板的最后一个元素
annotator.box_label(xyxy, label, color=color)
if len(tracks) > 0:
tracks[:, 7] = frameId
# trackerBoxes = np.concatenate([trackerBoxes, tracks], axis=0)
'''================== 1. 存储 dets/subimgs/features Dict ============='''
imgs, features = ReIDEncoder.inference(im0, tracks)
featdict = {}
for ii, bid in enumerate(tracks[:, 8]):
featdict.update({f"{int(frameId)}_{int(bid)}": features[ii, :]}) # [f"feat_{int(bid)}"] = features[i, :]
frameDict = {"path": path,
"fid": int(frameId),
"bboxes": det,
"tboxes": tracks,
"imgs": imgs,
"feats": featdict}
yoloResnetTracker.append(frameDict)
# imgs, features = inference_image(im0, tracks)
# TrackerFeats = np.concatenate([TrackerFeats, features], axis=0)
'''================== 2. 提取手势位置 ==================='''
for *xyxy, id, conf, cls, fid, bid in reversed(tracks):
name = ('' if id==-1 else f'id:{int(id)} ') + names[int(cls)]
label = None if hide_labels else (name if hide_conf else f'{name} {conf:.2f}')
if id >=0 and cls==0:
color = colors(int(cls), True)
elif id >=0 and cls!=0:
color = colors(int(id), True)
else:
color = colors(19, True) # 19为调色板的最后一个元素
annotator.box_label(xyxy, label, color=color)
'''====== Save results (image and video) ======'''
# save_path = str(save_dir / Path(path).name) # 带有后缀名
@ -719,11 +713,13 @@ def main():
# p = r"D:\datasets\ym\永辉双摄视频\新建文件夹"
# p = r"\\192.168.1.28\share\测试_202406\0723\0723_2\20240723-112522_"
# p = r"D:\datasets\ym\联华中环"
p = r"D:\exhibition\images\153112511_0_seek_105.mp4"
# p = r"D:\exhibition\images\153112511_0_seek_105.mp4"
# p = r"D:\exhibition\images\image"
optdict["project"] = r"D:\exhibition\result"
p = r"\\192.168.1.28\share\数据\原始数据\小物品数据\视频\82654976401_20241213-143457_front_addGood_5478c9a53bbe_40_17700000001.mp4"
optdict["project"] = r"D:\小物品入侵检测\result"
# optdict["project"] = r"D:\exhibition\result"
if os.path.isdir(p):
files = find_video_imgs(p)
k = 0

View File

@ -33,6 +33,22 @@ def find_samebox_in_array(arr, target):
return i
return -1
def array2list(boxes, feats):
'''boxes: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]'''
trackID = np.unique(boxes[:, 4].astype(int))
track_ids = boxes[:, 4].astype(int)
lboxes = []
for t_id in trackID:
idx = np.where(track_ids == t_id)[0]
box = boxes[idx, :]
feat = feats[idx, :]
assert len(set(box[:, 7])) == len(box), "Please check!!!"
lboxes.append(box)
return lboxes
def extract_data(datapath):
'''
@ -174,6 +190,79 @@ def extract_data(datapath):
# return bboxes, ffeats, trackerboxes, tracker_feat_dict, trackingboxes, trackingfeats
return bboxes, ffeats, trackerboxes, tracker_feats, trackingboxes, trackingfeats
def extract_data_realtime(datapath):
boxes, feats = [], []
tracker_feats = {}
with open(datapath, 'r', encoding='utf-8') as lines:
for line in lines:
line = line.strip() # 去除行尾的换行符和可能的空白字符
if not line: # 跳过空行
continue
if line.endswith(','):
line = line[:-1]
ftlist = [float(x) for x in line.split()]
if len(ftlist) != 265: continue
boxes.append(ftlist[:9])
feats.append(ftlist[9:])
trackerboxes = np.array(boxes)
trackerfeats = np.array(feats)
if len(trackerboxes)==0 or len(trackerboxes) != len(trackerfeats):
return np.array([]), {}
frmIDs = np.sort(np.unique(trackerboxes[:, 7].astype(int)))
for fid in frmIDs:
idx = np.where(trackerboxes[:, 7] == fid)[0]
box = trackerboxes[idx, :]
feat = trackerfeats[idx, :]
for i in range(len(box)):
f, b = int(box[i, 7]), int(box[i, 8])
tracker_feats.update({f"{f}_{b}": feat[i, :]})
return trackerboxes, tracker_feats
def read_tracking_output_realtime(datapath):
trackingboxes, trackingfeats = [], []
tracking_outboxes, tracking_outfeats = [], []
with open(datapath, 'r', encoding='utf-8') as lines:
boxes, feats = [], []
Flag = False
for line in lines:
line = line.strip() # 去除行尾的换行符和可能的空白字符
if not line: # 跳过空行
continue
if line.endswith(','):
line = line[:-1]
ftlist = [float(x) for x in line.split()]
if len(ftlist) != 265: continue
Flag = all(elem == 0 for elem in ftlist)
if Flag:
trackingboxes.append(np.array(boxes))
trackingfeats.append(np.array(feats))
boxes, feats = [], []
continue
boxes.append(ftlist[:9])
feats.append(ftlist[9:])
if len(boxes):
trackingboxes.append(np.array(boxes))
trackingfeats.append(np.array(feats))
if len(trackingboxes):
tracking_outboxes = trackingboxes[:1]
tracking_outfeats = trackingfeats[:1]
return trackingboxes, trackingfeats, tracking_outboxes, tracking_outfeats
def read_tracking_output(filepath):
'''
0/1_tracking_output.data 数据读取
@ -182,7 +271,7 @@ def read_tracking_output(filepath):
boxes = []
feats = []
if not os.path.isfile(filepath):
return np.array(boxes), np.array(feats)
return boxes, feats
with open(filepath, 'r', encoding='utf-8') as file:
for line in file:
@ -201,10 +290,173 @@ def read_tracking_output(filepath):
feats.append(data)
if len(feats) != len(boxes):
return [np.array([])], [np.array([])]
if len(feats) != len(boxes) or len(boxes)==0:
return [], []
return [np.array(boxes)], [np.array(feats)]
def read_process(filePath):
timeDict = {}
with open(filePath, 'r', encoding='utf-8') as f:
lines = f.readlines()
clines = [line.strip().replace("'", '').replace('"', '') for line in lines]
for i, line in enumerate(clines):
line = line.strip()
if line.endswith(','):
line = line[:-1]
if not line: continue
lnList = line.split(":")
if line.find("eventStart")>=0:
timeDict["eventStart"] = int(lnList[-1])
if line.find("eventEnd")>=0:
timeDict["eventEnd"] = int(lnList[-1])
if line.find("weightValue")>=0:
timeDict["weightValue"] = int(lnList[-1])
return timeDict
def read_similar(filePath):
SimiDict = {}
SimiDict['one2one'] = []
SimiDict['one2SN'] = []
SimiDict['one2n'] = []
with open(filePath, 'r', encoding='utf-8') as f:
lines = f.readlines()
clean_lines = [line.strip().replace("'", '').replace('"', '') for line in lines]
one2one_list, one2SN_list, one2n_list = [], [], []
Flag_1to1, Flag_1toSN, Flag_1ton = False, False, False
for i, line in enumerate(clean_lines):
line = line.strip()
if line.endswith(','):
line = line[:-1]
Dict = {}
if not line:
if len(one2one_list): SimiDict['one2one'] = one2one_list
if len(one2SN_list): SimiDict['one2SN'] = one2SN_list
if len(one2n_list): SimiDict['one2n'] = one2n_list
one2one_list, one2SN_list, one2n_list = [], [], []
Flag_1to1, Flag_1toSN, Flag_1ton = False, False, False
continue
if line.find('oneToOne')>=0:
Flag_1to1, Flag_1toSN, Flag_1ton = True, False,False
continue
if line.find('oneToSN')>=0:
Flag_1to1, Flag_1toSN, Flag_1ton = False, True, False
continue
if line.find('oneTon')>=0:
Flag_1to1, Flag_1toSN, Flag_1ton = False, False, True
continue
if Flag_1to1:
barcode = line.split(',')[0].strip()
value = line.split(',')[1].split(':')[1].strip()
Dict['barcode'] = barcode
Dict['similar'] = float(value)
one2one_list.append(Dict)
continue
if Flag_1toSN:
barcode = line.split(',')[0].strip()
value = line.split(',')[1].split(':')[1].strip()
Dict['barcode'] = barcode
Dict['similar'] = float(value)
one2SN_list.append(Dict)
continue
if Flag_1ton:
label = line.split(':')[0].strip()
value = line.split(':')[1].strip()
bcd = label.split('_')[-1]
if len(bcd)<8: continue
Dict['event'] = label
Dict['barcode'] = bcd
Dict['similar'] = float(value.split(',')[0])
Dict['type'] = value.split('=')[-1]
one2n_list.append(Dict)
if len(one2one_list): SimiDict['one2one'] = one2one_list
if len(one2n_list): SimiDict['one2n'] = one2n_list
if len(one2SN_list): SimiDict['one2SN'] = one2SN_list
return SimiDict
def read_weight_sensor(filepath):
WeightDict = OrderedDict()
with open(filepath, 'r', encoding='utf-8') as f:
lines = f.readlines()
clean_lines = [line.strip().replace("'", '').replace('"', '') for line in lines]
for i, line in enumerate(clean_lines):
line = line.strip()
line = line.strip()
if line.find(':') < 0: continue
if line.find("Weight") >= 0:
label = "Weight"
continue
keyword = line.split(':')[0]
value = line.split(':')[1]
if label == "Weight":
vdata = [float(s) for s in value.split(',') if len(s)]
WeightDict[keyword] = vdata[-1]
weights = [(float(t), w) for t, w in WeightDict.items()]
weights = np.array(weights).astype(np.int64)
return weights
def read_weight_timeConsuming(filePth):
WeightDict, SensorDict, ProcessTimeDict = OrderedDict(), OrderedDict(), OrderedDict()
with open(filePth, 'r', encoding='utf-8') as f:
lines = f.readlines()
# label = ''
for i, line in enumerate(lines):
line = line.strip()
if line.find(':') < 0: continue
if line.find("Weight") >= 0:
label = "Weight"
continue
if line.find("Sensor") >= 0:
label = "Sensor"
continue
if line.find("processTime") >= 0:
label = "ProcessTime"
continue
keyword = line.split(':')[0]
value = line.split(':')[1]
if label == "Weight":
WeightDict[keyword] = float(value.strip(','))
if label == "Sensor":
SensorDict[keyword] = [float(s) for s in value.split(',') if len(s)]
if label == "ProcessTime":
ProcessTimeDict[keyword] = float(value.strip(','))
# print("Done!")
return WeightDict, SensorDict, ProcessTimeDict
def read_deletedBarcode_file(filePath):
@ -320,132 +572,7 @@ def read_returnGoods_file(filePath):
def read_seneor(filepath):
WeightDict = OrderedDict()
with open(filepath, 'r', encoding='utf-8') as f:
lines = f.readlines()
clean_lines = [line.strip().replace("'", '').replace('"', '') for line in lines]
for i, line in enumerate(clean_lines):
line = line.strip()
keyword = line.split(':')[0]
value = line.split(':')[1]
vdata = [float(s) for s in value.split(',') if len(s)]
WeightDict[keyword] = vdata[-1]
return WeightDict
def read_similar(filePath):
SimiDict = {}
SimiDict['one2one'] = []
SimiDict['one2SN'] = []
SimiDict['one2n'] = []
with open(filePath, 'r', encoding='utf-8') as f:
lines = f.readlines()
clean_lines = [line.strip().replace("'", '').replace('"', '') for line in lines]
one2one_list, one2SN_list, one2n_list = [], [], []
Flag_1to1, Flag_1toSN, Flag_1ton = False, False, False
for i, line in enumerate(clean_lines):
line = line.strip()
if line.endswith(','):
line = line[:-1]
Dict = {}
if not line:
if len(one2one_list): SimiDict['one2one'] = one2one_list
if len(one2SN_list): SimiDict['one2SN'] = one2SN_list
if len(one2n_list): SimiDict['one2n'] = one2n_list
one2one_list, one2SN_list, one2n_list = [], [], []
Flag_1to1, Flag_1toSN, Flag_1ton = False, False, False
continue
if line.find('oneToOne')>=0:
Flag_1to1, Flag_1toSN, Flag_1ton = True, False,False
continue
if line.find('oneToSN')>=0:
Flag_1to1, Flag_1toSN, Flag_1ton = False, True, False
continue
if line.find('oneTon')>=0:
Flag_1to1, Flag_1toSN, Flag_1ton = False, False, True
continue
if Flag_1to1:
barcode = line.split(',')[0].strip()
value = line.split(',')[1].split(':')[1].strip()
Dict['barcode'] = barcode
Dict['similar'] = float(value)
one2one_list.append(Dict)
continue
if Flag_1toSN:
barcode = line.split(',')[0].strip()
value = line.split(',')[1].split(':')[1].strip()
Dict['barcode'] = barcode
Dict['similar'] = float(value)
one2SN_list.append(Dict)
continue
if Flag_1ton:
label = line.split(':')[0].strip()
value = line.split(':')[1].strip()
bcd = label.split('_')[-1]
if len(bcd)<8: continue
Dict['event'] = label
Dict['barcode'] = bcd
Dict['similar'] = float(value.split(',')[0])
Dict['type'] = value.split('=')[-1]
one2n_list.append(Dict)
if len(one2one_list): SimiDict['one2one'] = one2one_list
if len(one2n_list): SimiDict['one2n'] = one2n_list
if len(one2SN_list): SimiDict['one2SN'] = one2SN_list
return SimiDict
def read_weight_timeConsuming(filePth):
WeightDict, SensorDict, ProcessTimeDict = OrderedDict(), OrderedDict(), OrderedDict()
with open(filePth, 'r', encoding='utf-8') as f:
lines = f.readlines()
# label = ''
for i, line in enumerate(lines):
line = line.strip()
if line.find(':') < 0: continue
if line.find("Weight") >= 0:
label = "Weight"
continue
if line.find("Sensor") >= 0:
label = "Sensor"
continue
if line.find("processTime") >= 0:
label = "ProcessTime"
continue
keyword = line.split(':')[0]
value = line.split(':')[1]
if label == "Weight":
WeightDict[keyword] = float(value.strip(','))
if label == "Sensor":
SensorDict[keyword] = [float(s) for s in value.split(',') if len(s)]
if label == "ProcessTime":
ProcessTimeDict[keyword] = float(value.strip(','))
# print("Done!")
return WeightDict, SensorDict, ProcessTimeDict
def plot_sensor_curve(WeightDict, SensorDict, ProcessTimeDict):

View File

@ -80,12 +80,12 @@ def videosave(bboxes, videopath="100_1688009697927.mp4"):
cap.release()
def main():
videopath = r'D:\datasets\ym'
savepath = r'D:\datasets\ym'
videopath = r'D:\videos'
savepath = r'D:\videos'
# video2imgs(videopath, savepath)
k = 0
for filename in os.listdir(videopath):
filename = "20240929-155533.ts"
# filename = "20240929-155533.ts"
file, ext = os.path.splitext(filename)
if ext not in VideoFormat:
@ -100,9 +100,9 @@ def main():
videof = os.path.join(videopath, filename)
video2imgs(videof, imgdir)
k += 1
if k == 1:
break
# k += 1
# if k == 1:
# break

View File

@ -27,6 +27,8 @@
4. 整体流程仿真
pipeline.py
SourceType: "image", "video", yolo+resent+tracker模块输入数据类型
保存为 pickle 文件,该文件可用于 ShoppingEvent 对象构建。
@ -181,63 +183,79 @@
test_one2one()
(1) 生成标准特征集, 只需运行一次
genfeatures()
(2) 生成事件字典, 只需运行一次
gen_eventdict(eventDatePath, saveimg)
参数:
eventDatePath: 事件集列表,其中每个元素均为事件的集合;
saveimg: 是否保存事件子图
1:1比对
(1) 初始化事件和标准特征集, 只需运行一次
init_std_evt_dict()
(2) 事件barcode集和标准库barcode求交集
build_std_evt_dict()
(3) 1:1性能评估
one2one_simi()
(4) 计算PR曲线
one2one_pr()
test_one2SN()
1:SN比对
(1) 初始化事件和标准特征集, 只需运行一次
init_std_evt_dict()
(2) 事件barcode集和标准库barcode求交集
build_std_evt__dict()
(3) 1:SN性能评估
one2SN_pr()
creat_shopping_event(eventPath, subimgPath=False)
构造一次购物事件字典, 共12个关键字。
init_std_evt_dict()
生成标准特征集与事件对象并存储,并存储相关数据,只需运行一次
save_event_subimg(event, savepath)
保存一次购物事件的子图
(1) Barcode图像进行特征推理并保存
gen_bcd_features(stdSamplePath, stdBarcodePath, stdFeaturePath, bcdSet)
(2) 基于data文件或pipeline输出的pickle文件生成 ShoppingEvent 对象
gen_eventdict(eventDatePath, saveimg)
build_std_evt_dict()
基于事件barcode集和标准库barcode交集构造用于评估性能的事件集合。
simi_calc()
计算事件和标准特征集的相似度
data_precision_compare()
不同数据精度下的性能比较
one2one_eval()
compute_precise_recall()
int8_to_ft16()
ft16_to_uint8()
one2n_contrast.py
执行1:n共需要3步
'''1. 生成事件字典并保存至 eventDataPath, 只需运行一次 '''
# gen_eventdict(sourcePath)
'''2. 读取时间字典 '''
evtDicts = read_eventdict(eventDataPath)
'''3. 1:n 比对事件评估 '''
执行1:n共需要3步分别对应3个函数
(1) 生成事件字典并保存至 eventDataPath, 只需运行一次
(2) 读取事件字典
(3) 1:n 比对事件评估
(1) gen_eventdict(sourcePath)
(2) read_eventdict()
inputs
eventDataPath: 类 ShoppingEvent 对象存储地址,该对象由函数 gen_eventdict() 生成。
output
evtDicts
(3) one2n_pr()
inputs
evtDicts
output
fpeventsFP事件地址
(4) simi_calc()
计算两个事件的相似度
one2n_contrast_old.py (disused)
test_one2n()
@ -290,13 +308,6 @@
获得 1:n 情况下正确或匹配事件对(取出事件、放入事件、错误匹配事件)
匹配事件分析, 实现函数save_tracking_imgpairs()
time_devide.py
runyolo()