last update in 2024

This commit is contained in:
王庆刚
2024-12-31 16:45:04 +08:00
parent dac3b3f2b6
commit 7e13e0f5b4
20 changed files with 1349 additions and 389 deletions

Binary file not shown.

View File

@ -6,10 +6,39 @@ Created on Mon Dec 16 18:56:18 2024
"""
import os
import cv2
import json
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib.font_manager import FontProperties
from scipy.spatial.distance import cdist
from utils.event import ShoppingEvent, save_data
def main():
rcParams['font.sans-serif'] = ['SimHei'] # 用黑体显示中文
rcParams['axes.unicode_minus'] = False # 正确显示负号
'''*********** USearch ***********'''
def read_usearch():
stdFeaturePath = r"D:\contrast\stdlib\v11_test.json"
stdBarcode = []
stdlib = {}
with open(stdFeaturePath, 'r', encoding='utf-8') as f:
data = json.load(f)
for dic in data['total']:
barcode = dic['key']
feature = np.array(dic['value'])
stdBarcode.append(barcode)
stdlib[barcode] = feature
return stdlib
def get_eventlist():
'''
读取一次测试中的错误事件
'''
evtpaths = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\images"
text1 = "one2n_Error.txt"
text2 = "one2SN_Error.txt"
@ -24,9 +53,16 @@ def main():
if line:
fpath=os.path.join(evtpaths, line)
events.append(fpath)
events = list(set(events))
return events
def single_event():
events = get_eventlist()
'''定义当前事件存储地址及生成相应文件件'''
resultPath = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\result\single_event"
@ -35,6 +71,291 @@ def main():
save_data(event, resultPath)
print(event.evtname)
def get_topk_percent(data, k):
"""
获取数据中最大的 k% 的元素
"""
# 将数据转换为 NumPy 数组
if isinstance(data, list):
data = np.array(data)
percentile = np.percentile(data, 100-k)
top_k_percent = data[data >= percentile]
return top_k_percent
def cluster(data, thresh=0.15):
# data = np.array([0.1, 0.13, 0.7, 0.2, 0.8, 0.52, 0.3, 0.7, 0.85, 0.58])
# data = np.array([0.1, 0.13, 0.2, 0.3])
# data = np.array([0.1])
if isinstance(data, list):
data = np.array(data)
data1 = np.sort(data)
cluter, Cluters, = [data1[0]], []
for i in range(1, len(data1)):
if data1[i] - data1[i-1]< thresh:
cluter.append(data1[i])
else:
Cluters.append(cluter)
cluter = [data1[i]]
Cluters.append(cluter)
clt_center = []
for clt in Cluters:
## 是否应该在此处限制一个聚类中的最小轨迹样本数,应该将该因素放在轨迹分析中
# if len(clt)>=3:
# clt_center.append(np.mean(clt))
clt_center.append(np.mean(clt))
# print(clt_center)
return clt_center
def calc_simil(event, stdfeat):
def calsiml(feat1, feat2):
'''轨迹样本和标准特征集样本相似度的选择策略'''
matrix = 1 - cdist(feat1, feat2, 'cosine')
simi_max = []
for i in range(len(matrix)):
sim = np.mean(get_topk_percent(matrix[i, :], 75))
simi_max.append(sim)
cltc_max = cluster(simi_max)
Simi = max(cltc_max)
## cltc_max为空属于编程考虑不周应予以排查解决
# if len(cltc_max):
# Simi = max(cltc_max)
# else:
# Simi = 0 #不应该走到该处
return Simi
front_boxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
front_feats = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
for i in range(len(event.front_boxes)):
front_boxes = np.concatenate((front_boxes, event.front_boxes[i]), axis=0)
front_feats = np.concatenate((front_feats, event.front_feats[i]), axis=0)
back_boxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
back_feats = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
for i in range(len(event.back_boxes)):
back_boxes = np.concatenate((back_boxes, event.back_boxes[i]), axis=0)
back_feats = np.concatenate((back_feats, event.back_feats[i]), axis=0)
if len(front_feats):
front_simi = calsiml(front_feats, stdfeat)
if len(back_feats):
back_simi = calsiml(back_feats, stdfeat)
'''前后摄相似度融合策略'''
if len(front_feats) and len(back_feats):
diff_simi = abs(front_simi - back_simi)
if diff_simi>0.15:
Similar = max([front_simi, back_simi])
else:
Similar = (front_simi+back_simi)/2
elif len(front_feats) and len(back_feats)==0:
Similar = front_simi
elif len(front_feats)==0 and len(back_feats):
Similar = back_simi
else:
Similar = None # 在event.front_feats和event.back_feats同时为空时
return Similar
def simi_matrix():
resultPath = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\result\single_event"
stdlib = read_usearch()
events = get_eventlist()
for evtpath in events:
evtname = os.path.basename(evtpath)
_, barcode = evtname.split("_")
# 生成事件与相应标准特征集
event = ShoppingEvent(evtpath)
stdfeat = stdlib[barcode]
Similar = calc_simil(event, stdfeat)
# 构造 boxes 子图存储路径
subimgpath = os.path.join(resultPath, f"{event.evtname}", "subimg")
if not os.path.exists(subimgpath):
os.makedirs(subimgpath)
histpath = os.path.join(resultPath, "simi_hist")
if not os.path.exists(histpath):
os.makedirs(histpath)
mean_values, max_values = [], []
cameras = ('front', 'back')
fig, ax = plt.subplots(2, 3, figsize=(16, 9), dpi=100)
kpercent = 25
for camera in cameras:
boxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
evtfeat = np.empty((0, 256), dtype=np.float64) ##和类doTracks兼容
if camera == 'front':
for i in range(len(event.front_boxes)):
boxes = np.concatenate((boxes, event.front_boxes[i]), axis=0)
evtfeat = np.concatenate((evtfeat, event.front_feats[i]), axis=0)
imgpaths = event.front_imgpaths
else:
for i in range(len(event.back_boxes)):
boxes = np.concatenate((boxes, event.back_boxes[i]), axis=0)
evtfeat = np.concatenate((evtfeat, event.back_feats[i]), axis=0)
imgpaths = event.back_imgpaths
assert len(boxes)==len(evtfeat), f"Please check the Event: {evtname}"
if len(boxes)==0: continue
print(evtname)
matrix = 1 - cdist(evtfeat, stdfeat, 'cosine')
simi_1d = matrix.flatten()
simi_mean = np.mean(matrix, axis=1)
# simi_max = np.max(matrix, axis=1)
'''以相似度矩阵每一行最大的 k% 的相似度做均值计算'''
simi_max = []
for i in range(len(matrix)):
sim = np.mean(get_topk_percent(matrix[i, :], kpercent))
simi_max.append(sim)
mean_values.append(np.mean(matrix))
max_values.append(np.mean(simi_max))
diff_max_mean = np.mean(simi_max) - np.mean(matrix)
'''相似度统计特性图示'''
k =0
if camera == 'front': k = 1
'''********************* 相似度全体数据 *********************'''
ax[k, 0].hist(simi_1d, bins=60, range=(-0.2, 1), edgecolor='black')
ax[k, 0].set_xlim([-0.2, 1])
ax[k, 0].set_title(camera)
_, y_max = ax[k, 0].get_ylim() # 获取y轴范围
'''相似度变动范围'''
ax[k, 0].text(-0.1, 0.15*y_max, f"rng:{max(simi_1d)-min(simi_1d):.3f}", fontsize=18, color='b')
'''********************* 均值********************************'''
ax[k, 1].hist(simi_mean, bins=24, range=(-0.2, 1), edgecolor='black')
ax[k, 1].set_xlim([-0.2, 1])
ax[k, 1].set_title("mean")
_, y_max = ax[k, 1].get_ylim() # 获取y轴范围
'''相似度变动范围'''
ax[k, 1].text(-0.1, 0.15*y_max, f"rng:{max(simi_mean)-min(simi_mean):.3f}", fontsize=18, color='b')
'''********************* 最大值 ******************************'''
ax[k, 2].hist(simi_max, bins=24, range=(-0.2, 1), edgecolor='black')
ax[k, 2].set_xlim([-0.2, 1])
ax[k, 2].set_title("max")
_, y_max = ax[k, 2].get_ylim() # 获取y轴范围
'''相似度变动范围'''
ax[k, 2].text(-0.1, 0.15*y_max, f"rng:{max(simi_max)-min(simi_max):.3f}", fontsize=18, color='b')
'''绘制聚类中心'''
cltc_mean = cluster(simi_mean)
for value in cltc_mean:
ax[k, 1].axvline(x=value, color='m', linestyle='--', linewidth=3)
cltc_max = cluster(simi_max)
for value in cltc_max:
ax[k, 2].axvline(x=value, color='m', linestyle='--', linewidth=3)
'''绘制相似度均值与最大值均值'''
ax[k, 1].axvline(x=np.mean(matrix), color='r', linestyle='-', linewidth=3)
ax[k, 2].axvline(x=np.mean(simi_max), color='g', linestyle='-', linewidth=3)
'''绘制相似度最大值均值 - 均值'''
_, y_max = ax[k, 2].get_ylim() # 获取y轴范围
ax[k, 2].text(-0.1, 0.05*y_max, f"g-r={diff_max_mean:.3f}", fontsize=18, color='m')
plt.show()
# for i, box in enumerate(boxes):
# x1, y1, x2, y2, tid, score, cls, fid, bid = box
# imgpath = imgpaths[int(fid-1)]
# image = cv2.imread(imgpath)
# subimg = image[int(y1/2):int(y2/2), int(x1/2):int(x2/2), :]
# camerType, timeTamp, _, frameID = os.path.basename(imgpath).split('.')[0].split('_')
# subimgName = f"cam{camerType}_{i}_tid{int(tid)}_fid({int(fid)}, {frameID})_{simi_mean[i]:.3f}.png"
# imgpairs.append((subimgName, subimg))
# spath = os.path.join(subimgpath, subimgName)
# cv2.imwrite(spath, subimg)
# oldname = f"cam{camerType}_{i}_tid{int(tid)}_fid({int(fid)}, {frameID}).png"
# oldpath = os.path.join(subimgpath, oldname)
# if os.path.exists(oldpath):
# os.remove(oldpath)
if len(mean_values)==2:
mean_diff = abs(mean_values[1]-mean_values[0])
ax[0, 1].set_title(f"mean diff: {mean_diff:.3f}")
if len(max_values)==2:
max_values = abs(max_values[1]-max_values[0])
ax[0, 2].set_title(f"max diff: {max_values:.3f}")
try:
fig.suptitle(f"Similar: {Similar:.3f}", fontsize=16)
except Exception as e:
print(e)
print(f"Similar: {Similar}")
pltpath = os.path.join(subimgpath, f"hist_max_{kpercent}%_.png")
plt.savefig(pltpath)
pltpath1 = os.path.join(histpath, f"{evtname}_.png")
plt.savefig(pltpath1)
plt.close()
def main():
simi_matrix()
@ -42,3 +363,14 @@ def main():
if __name__ == "__main__":
main()
# cluster()

View File

@ -61,8 +61,8 @@ class Config:
test_val = "D:/比对/cl"
# test_val = "./data/test_data_100"
# test_model = "checkpoints/best_resnet18_v11.pth"
test_model = "checkpoints/zhanting_res_801.pth"
test_model = "checkpoints/best_resnet18_v11.pth"
# test_model = "checkpoints/zhanting_res_801.pth"

View File

@ -13,25 +13,23 @@ from scipy.spatial.distance import cdist
from utils.event import ShoppingEvent
def gen_eventdict(sourcePath, stype="data"):
def init_eventdict(sourcePath, stype="data"):
'''stype: str,
'source': 由 videos 或 images 生成的 pickle 文件
'data': 从 data 文件中读取的现场运行数据
'''
k, errEvents = 0, []
for source_path in sourcePath:
evtpath, bname = os.path.split(source_path)
for bname in os.listdir(sourcePath):
# bname = r"20241126-135911-bdf91cf9-3e9a-426d-94e8-ddf92238e175_6923555210479"
source_path = os.path.join(evtpath, bname)
source_path = os.path.join(sourcePath, bname)
if not os.path.isdir(source_path): continue
pickpath = os.path.join(eventDataPath, f"{bname}.pickle")
if os.path.isfile(pickpath): continue
try:
event = ShoppingEvent(source_path, stype)
# save_data(event, resultPath)
with open(pickpath, 'wb') as f:
pickle.dump(event, f)
@ -44,7 +42,7 @@ def gen_eventdict(sourcePath, stype="data"):
# break
errfile = os.path.join(resultPath, 'error_events.txt')
with open(errfile, 'w', encoding='utf-8') as f:
with open(errfile, 'a', encoding='utf-8') as f:
for line in errEvents:
f.write(line + '\n')
@ -61,6 +59,44 @@ def read_eventdict(eventDataPath):
return evtDict
def simi_calc(event, o2nevt, typee=None):
if typee == "11":
boxes1 = event.front_boxes
boxes2 = o2nevt.front_boxes
feat1 = event.front_feats
feat2 = o2nevt.front_feats
if typee == "10":
boxes1 = event.front_boxes
boxes2 = o2nevt.back_boxes
feat1 = event.front_feats
feat2 = o2nevt.back_feats
if typee == "00":
boxes1 = event.back_boxes
boxes2 = o2nevt.back_boxes
feat1 = event.back_feats
feat2 = o2nevt.back_feats
if typee == "01":
boxes1 = event.back_boxes
boxes2 = o2nevt.front_boxes
feat1 = event.back_feats
feat2 = o2nevt.front_feats
if len(feat1) and len(feat2):
matrix = 1 - cdist(feat1[0], feat2[0], 'cosine')
simi = np.mean(matrix)
else:
simi = None
return simi
def one2n_pr(evtDicts):
@ -81,45 +117,22 @@ def one2n_pr(evtDicts):
o2nevt = o2n_evt[0]
else:
continue
if typee == "11":
boxes1 = event.front_trackingboxes
boxes2 = o2nevt.front_trackingboxes
feat1 = event.front_trackingfeats
feat2 = o2nevt.front_trackingfeats
if typee == "10":
boxes1 = event.front_trackingboxes
boxes2 = o2nevt.back_trackingboxes
feat1 = event.front_trackingfeats
feat2 = o2nevt.back_trackingfeats
if typee == "00":
boxes1 = event.back_trackingboxes
boxes2 = o2nevt.back_trackingboxes
feat1 = event.back_trackingfeats
feat2 = o2nevt.back_trackingfeats
if typee == "01":
boxes1 = event.back_trackingboxes
boxes2 = o2nevt.front_trackingboxes
feat1 = event.back_trackingfeats
feat2 = o2nevt.front_trackingfeats
matrix = 1 - cdist(feat1[0], feat2[0], 'cosine')
simi_mean = np.mean(matrix)
simi_max = np.max(matrix)
simival = simi_calc(event, o2nevt, typee)
if simival==None:
continue
evt_names.append(nname)
evt_barcodes.append(barcode)
evt_similars.append(simi_mean)
evt_similars.append(simival)
evt_types.append(typee)
if len(evt_names)==len(evt_barcodes) and len(evt_barcodes)==len(evt_similars) \
and len(evt_similars)==len(evt_types) and len(evt_names)>0:
maxsim = evt_similars[evt_similars.index(max(evt_similars))]
# maxsim = evt_similars[evt_similars.index(max(evt_similars))]
maxsim = max(evt_similars)
for i in range(len(evt_names)):
bcd, simi = evt_barcodes[i], evt_similars[i]
@ -173,16 +186,16 @@ def one2n_pr(evtDicts):
plt.show()
## ============================= 1:n 直方图'''
fig, axes = plt.subplots(2, 2)
axes[0, 0].hist(tpsimi, bins=60, edgecolor='black')
axes[0, 0].hist(tpsimi, bins=60, range=(-0.2, 1), edgecolor='black')
axes[0, 0].set_xlim([-0.2, 1])
axes[0, 0].set_title('TP')
axes[0, 1].hist(fpsimi, bins=60, edgecolor='black')
axes[0, 1].hist(fpsimi, bins=60, range=(-0.2, 1), edgecolor='black')
axes[0, 1].set_xlim([-0.2, 1])
axes[0, 1].set_title('FP')
axes[1, 0].hist(tnsimi, bins=60, edgecolor='black')
axes[1, 0].hist(tnsimi, bins=60, range=(-0.2, 1), edgecolor='black')
axes[1, 0].set_xlim([-0.2, 1])
axes[1, 0].set_title('TN')
axes[1, 1].hist(fnsimi, bins=60, edgecolor='black')
axes[1, 1].hist(fnsimi, bins=60, range=(-0.2, 1), edgecolor='black')
axes[1, 1].set_xlim([-0.2, 1])
axes[1, 1].set_title('FN')
plt.show()
@ -192,16 +205,16 @@ def one2n_pr(evtDicts):
def main():
'''1. 生成事件字典并保存至 eventDataPath, 只需运行一次 '''
# gen_eventdict(sourcePath)
# init_eventdict(eventSourcePath)
'''2. 读取时间字典 '''
'''2. 读取事件字典 '''
evtDicts = read_eventdict(eventDataPath)
'''3. 1:n 比对事件评估 '''
fpevents = one2n_pr(evtDicts)
fpErrFile = str(Path(resultPath).joinpath("one2n_Error.txt"))
fpErrFile = str(Path(resultPath).joinpath("one2n_fp_Error.txt"))
with open(fpErrFile, "w") as file:
for item in fpevents:
file.write(item + "\n")
@ -214,8 +227,7 @@ def main():
if __name__ == '__main__':
eventSourcePath = [r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\images"]
eventSourcePath = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\images"
resultPath = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\result"
eventDataPath = os.path.join(resultPath, "evtobjs")

View File

@ -11,7 +11,7 @@ Created on Fri Aug 30 17:53:03 2024
标准特征提取,并保存至文件夹 stdFeaturePath 中,
也可在运行过程中根据与购物事件集合 barcodes 交集执行
2. 1:1 比对性能测试,
func: one2one_eval(similPath)
func: one2one_simi()
(1) 求购物事件和标准特征级 Barcode 交集,构造 evtDict、stdDict
(2) 构造扫 A 放 A、扫 A 放 B 组合mergePairs = AA_list + AB_list
(3) 循环计算 mergePairs 中元素 "(A, A) 或 (A, B)" 相似度;
@ -20,7 +20,7 @@ Created on Fri Aug 30 17:53:03 2024
3. precise、recall等指标计算
func: compute_precise_recall(pickpath)
func: compute_one2one_pr(pickpath)
@author: ym
@ -33,6 +33,9 @@ import sys
import random
import pickle
import json
import random
import copy
import sys
# import torch
import time
# import json
@ -54,6 +57,7 @@ from feat_extract.config import config as conf
from feat_extract.inference import FeatsInterface
from utils.event import ShoppingEvent, save_data
from genfeats import gen_bcd_features
from event_test import calc_simil
@ -175,14 +179,53 @@ def data_precision_compare(stdfeat, evtfeat, evtMessage, save=True):
f.write(line + '\n')
def one2one_simi():
def simi_calc(event, stdfeat):
evtfeat = event.feats_compose
if isinstance(event.feats_select, list):
if len(event.feats_select) and len(event.feats_select[0]):
evtfeat = event.feats_select[0]
else:
return None, None, None
else:
evtfeat = event.feats_select
if len(evtfeat)==0 or len(stdfeat)==0:
return None, None, None
matrix = 1 - cdist(evtfeat, stdfeat, 'cosine')
matrix[matrix < 0] = 0
simi_mean = np.mean(matrix)
simi_max = np.max(matrix)
stdfeatm = np.mean(stdfeat, axis=0, keepdims=True)
evtfeatm = np.mean(evtfeat, axis=0, keepdims=True)
simi_mfeat = 1- np.maximum(0.0, cdist(stdfeatm, evtfeatm, 'cosine'))
return simi_mean, simi_max, simi_mfeat[0,0]
def build_std_evt_dict():
'''
stdFeaturePath: 标准特征集地址
eventDataPath: Event对象地址
'''
stdBarcode = [p.stem for p in Path(stdFeaturePath).iterdir() if p.is_file() and p.suffix=='.pickle']
# stdBarcode = [p.stem for p in Path(stdFeaturePath).iterdir() if p.is_file() and p.suffix=='.json']
'''*********** USearch ***********'''
stdFeaturePath = r"D:\contrast\stdlib\v11_test.json"
stdBarcode = []
stdlib = {}
with open(stdFeaturePath, 'r', encoding='utf-8') as f:
data = json.load(f)
for dic in data['total']:
barcode = dic['key']
feature = np.array(dic['value'])
stdBarcode.append(barcode)
stdlib[barcode] = feature
'''======1. 购物事件列表,该列表中的 Barcode 存在于标准的 stdBarcode 内 ==='''
evtList = [(p.stem, p.stem.split('_')[-1]) for p in Path(eventDataPath).iterdir()
if p.is_file()
@ -192,16 +235,21 @@ def one2one_simi():
and p.stem.split('_')[-1] in stdBarcode
]
barcodes = set([bcd for _, bcd in evtList])
'''======2. 构建用于比对的标准特征字典 ============='''
# stdDict = {}
# for barcode in barcodes:
# stdpath = os.path.join(stdFeaturePath, barcode+'.json')
# with open(stdpath, 'r', encoding='utf-8') as f:
# stddata = json.load(f)
# feat = np.array(stddata["value"])
# stdDict[barcode] = feat
'''*********** USearch ***********'''
stdDict = {}
for barcode in barcodes:
stdpath = os.path.join(stdFeaturePath, barcode+'.pickle')
with open(stdpath, 'rb') as f:
stddata = pickle.load(f)
stdDict[barcode] = stddata
stdDict[barcode] = stdlib[barcode]
'''======3. 构建用于比对的操作事件字典 ============='''
evtDict = {}
for evtname, barcode in evtList:
@ -209,21 +257,123 @@ def one2one_simi():
with open(evtpath, 'rb') as f:
evtdata = pickle.load(f)
evtDict[evtname] = evtdata
return evtList, evtDict, stdDict
def one2SN_pr(evtList, evtDict, stdDict):
std_barcodes = set([bcd for _, bcd in evtList])
tp_events, fn_events, fp_events, tn_events = [], [], [], []
tp_simi, fn_simi, tn_simi, fp_simi = [], [], [], []
errorFile_one2SN = []
SN = 9
for evtname, barcode in evtList:
bcd_selected = [barcode]
dset = list(std_barcodes - set([barcode]))
if len(dset) > SN:
random.shuffle(dset)
bcd_selected.extend(dset[:SN])
else:
bcd_selected.extend(dset)
event = evtDict[evtname]
## 无轨迹判断
if len(event.front_feats)+len(event.back_feats)==0:
print(evtname)
continue
barcodes, similars = [], []
for stdbcd in bcd_selected:
stdfeat = stdDict[stdbcd]
# simi_mean, simi_max, simi_mfeat = simi_calc(event, stdfeat)
simi_mean = calc_simil(event, stdfeat)
## 在event.front_feats和event.back_feats同时为空时此处不需要保护
# if simi_mean==None:
# continue
barcodes.append(stdbcd)
similars.append(simi_mean)
## 此处不需要保护
# if len(similars)==0:
# print(evtname)
# continue
max_idx = similars.index(max(similars))
max_sim = similars[max_idx]
for i in range(len(barcodes)):
bcd, simi = barcodes[i], similars[i]
if bcd==barcode and simi==max_sim:
tp_simi.append(simi)
tp_events.append(evtname)
elif bcd==barcode and simi!=max_sim:
fn_simi.append(simi)
fn_events.append(evtname)
elif bcd!=barcode and simi!=max_sim:
tn_simi.append(simi)
tn_events.append(evtname)
elif bcd!=barcode and simi==max_sim and barcode in barcodes:
fp_simi.append(simi)
fp_events.append(evtname)
else:
errorFile_one2SN.append(evtname)
PPreciseX, PRecallX = [], []
NPreciseX, NRecallX = [], []
Thresh = np.linspace(-0.2, 1, 100)
for th in Thresh:
'''适用于 (Precise, Recall) 计算方式多个相似度计算并排序barcode相等且排名第一为 TP '''
'''===================================== 1:SN '''
TPX = sum(np.array(tp_simi) >= th)
FPX = sum(np.array(fp_simi) >= th)
FNX = sum(np.array(fn_simi) < th)
TNX = sum(np.array(tn_simi) < th)
PPreciseX.append(TPX/(TPX+FPX+1e-6))
PRecallX.append(TPX/(len(tp_simi)+len(fn_simi)+1e-6))
NPreciseX.append(TNX/(TNX+FNX+1e-6))
NRecallX.append(TNX/(len(tn_simi)+len(fp_simi)+1e-6))
fig, ax = plt.subplots()
ax.plot(Thresh, PPreciseX, 'r', label='Precise_Pos: TP/TPFP')
ax.plot(Thresh, PRecallX, 'b', label='Recall_Pos: TP/TPFN')
ax.plot(Thresh, NPreciseX, 'g', label='Precise_Neg: TN/TNFP')
ax.plot(Thresh, NRecallX, 'c', label='Recall_Neg: TN/TNFN')
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.grid(True)
ax.set_title('1:SN Precise & Recall')
ax.set_xlabel(f"Event Num: {len(evtList)}")
ax.legend()
plt.show()
## ============================= 1:N 展厅 直方图'''
fig, axes = plt.subplots(2, 2)
axes[0, 0].hist(tp_simi, bins=60, range=(-0.2, 1), edgecolor='black')
axes[0, 0].set_xlim([-0.2, 1])
axes[0, 0].set_title(f'TP({len(tp_simi)})')
axes[0, 1].hist(fp_simi, bins=60, range=(-0.2, 1), edgecolor='black')
axes[0, 1].set_xlim([-0.2, 1])
axes[0, 1].set_title(f'FP({len(fp_simi)})')
axes[1, 0].hist(tn_simi, bins=60, range=(-0.2, 1), edgecolor='black')
axes[1, 0].set_xlim([-0.2, 1])
axes[1, 0].set_title(f'TN({len(tn_simi)})')
axes[1, 1].hist(fn_simi, bins=60, range=(-0.2, 1), edgecolor='black')
axes[1, 1].set_xlim([-0.2, 1])
axes[1, 1].set_title(f'FN({len(fn_simi)})')
plt.show()
'''======4.2 barcode 标准图像保存 =================='''
# for stdbcd in barcodes:
# stdImgpath = stdDict[stdbcd]["imgpaths"]
# pstdpath = os.path.join(subimgPath, f"{stdbcd}")
# if not os.path.exists(pstdpath):
# os.makedirs(pstdpath)
# ii = 1
# for filepath in stdImgpath:
# stdpath = os.path.join(pstdpath, f"{stdbcd}_{ii}.png")
# shutil.copy2(filepath, stdpath)
# ii += 1
'''======5 构造 3 个事件对: 扫 A 放 A, 扫 A 放 B, 合并 ===================='''
def one2one_simi(evtList, evtDict, stdDict):
barcodes = set([bcd for _, bcd in evtList])
'''======1 构造 3 个事件对: 扫 A 放 A, 扫 A 放 B, 合并 ===================='''
AA_list = [(evtname, barcode, "same") for evtname, barcode in evtList]
AB_list = []
for evtname, barcode in evtList:
@ -234,45 +384,36 @@ def one2one_simi():
mergePairs = AA_list + AB_list
'''======6 计算事件、标准特征集相似度 =================='''
'''======2 计算事件、标准特征集相似度 =================='''
rltdata = []
for i in range(len(mergePairs)):
evtname, stdbcd, label = mergePairs[i]
event = evtDict[evtname]
##============================================ float32
stdfeat = stdDict[stdbcd]["feats_ft32"]
evtfeat = event.feats_compose
if len(evtfeat)==0: continue
matrix = 1 - cdist(stdfeat, evtfeat, 'cosine')
matrix[matrix < 0] = 0
simi_mean = np.mean(matrix)
simi_max = np.max(matrix)
stdfeatm = np.mean(stdfeat, axis=0, keepdims=True)
evtfeatm = np.mean(evtfeat, axis=0, keepdims=True)
simi_mfeat = 1- np.maximum(0.0, cdist(stdfeatm, evtfeatm, 'cosine'))
rltdata.append((label, stdbcd, evtname, simi_mean, simi_max, simi_mfeat[0,0]))
if len(event.feats_compose)==0: continue
stdfeat = stdDict[stdbcd] # float32
simi_mean, simi_max, simi_mfeat = simi_calc(event, stdfeat)
if simi_mean is None:
continue
rltdata.append((label, stdbcd, evtname, simi_mean, simi_max, simi_mfeat))
'''================ float32、16、int8 精度比较与存储 ============='''
# data_precision_compare(stdfeat, evtfeat, mergePairs[i], save=True)
print("func: one2one_eval(), have finished!")
return rltdata
def compute_precise_recall(rltdata):
def one2one_pr(rltdata):
Same, Cross = [], []
for label, stdbcd, evtname, simi_mean, simi_max, simi_mft in rltdata:
if label == "same":
Same.append(simi_mean)
Same.append(simi_max)
if label == "diff":
Cross.append(simi_mean)
Cross.append(simi_max)
Same = np.array(Same)
Cross = np.array(Cross)
@ -280,11 +421,11 @@ def compute_precise_recall(rltdata):
TNFP = len(Cross)
# fig, axs = plt.subplots(2, 1)
# axs[0].hist(Same, bins=60, edgecolor='black')
# axs[0].hist(Same, bins=60, range=(-0.2, 1), edgecolor='black')
# axs[0].set_xlim([-0.2, 1])
# axs[0].set_title(f'Same Barcode, Num: {TPFN}')
# axs[1].hist(Cross, bins=60, edgecolor='black')
# axs[1].hist(Cross, bins=60, range=(-0.2, 1), edgecolor='black')
# axs[1].set_xlim([-0.2, 1])
# axs[1].set_title(f'Cross Barcode, Num: {TNFP}')
# plt.savefig(f'./result/{file}_hist.png') # svg, png, pdf
@ -324,6 +465,23 @@ def compute_precise_recall(rltdata):
rltpath = os.path.join(similPath, 'pr.png')
plt.savefig(rltpath) # svg, png, pdf
fig, axes = plt.subplots(2,1)
axes[0].hist(Same, bins=60, range=(-0.2, 1), edgecolor='black')
axes[0].set_xlim([-0.2, 1])
axes[0].set_title(f'TP({len(Same)})')
axes[1].hist(Cross, bins=60, range=(-0.2, 1), edgecolor='black')
axes[1].set_xlim([-0.2, 1])
axes[1].set_title(f'TN({len(Cross)})')
rltpath = os.path.join(similPath, 'hist.png')
plt.savefig(rltpath)
plt.show()
def gen_eventdict(sourcePath, saveimg=True):
k, errEvents = 0, []
@ -358,9 +516,7 @@ def gen_eventdict(sourcePath, saveimg=True):
f.write(line + '\n')
def test_one2one():
def init_std_evt_dict():
'''==== 0. 生成事件列表和对应的 Barcodes列表 ==========='''
bcdList, event_spath = [], []
for evtpath in eventSourcePath:
@ -383,10 +539,33 @@ def test_one2one():
print("eventList have generated and saved!")
'''==== 3. 1:1性能评估 ==============='''
rltdata = one2one_simi()
compute_precise_recall(rltdata)
def test_one2one():
'''1:1性能评估'''
# 1. 只需运行一次,生成事件字典和相应的标准特征库字典
# init_std_evt_dict()
# 2. 基于事件barcode集和标准库barcode交集构造事件集合
evtList, evtDict, stdDict = build_std_evt_dict()
rltdata = one2one_simi(evtList, evtDict, stdDict)
one2one_pr(rltdata)
def test_one2SN():
'''1:SN性能评估'''
# 1. 只需运行一次,生成事件字典和相应的标准特征库字典
# init_std_evt_dict()
# 2. 事件barcode集和标准库barcode求交集
evtList, evtDict, stdDict = build_std_evt_dict()
one2SN_pr(evtList, evtDict, stdDict)
if __name__ == '__main__':
'''
@ -402,7 +581,7 @@ if __name__ == '__main__':
stdSamplePath = r"\\192.168.1.28\share\数据\已完成数据\展厅数据\v1.0\比对数据\整理\zhantingBase"
stdBarcodePath = r"D:\exhibition\dataset\bcdpath"
stdFeaturePath = r"D:\exhibition\dataset\feats"
stdFeaturePath = r"\\192.168.1.28\share\数据\已完成数据\比对数据\barcode\all_totalBarocde\features_json\v11_barcode_11592"
# eventSourcePath = [r'D:\exhibition\images\20241202']
# eventSourcePath = [r"\\192.168.1.28\share\测试视频数据以及日志\各模块测试记录\展厅测试\1129_展厅模型v801测试组测试"]
@ -419,6 +598,8 @@ if __name__ == '__main__':
os.makedirs(similPath)
test_one2one()
# test_one2SN()

View File

@ -107,9 +107,7 @@ def test_compare():
def contrast_pr(paths):
'''
1:1
'''
'''
paths = Path(paths)
evtpaths = []
@ -117,25 +115,19 @@ def contrast_pr(paths):
condt1 = p.is_dir()
condt2 = len(p.name.split('_'))>=2
condt3 = len(p.name.split('_')[-1])>8
condt4 = p.name.split('_')[-1].isdigit()
condt4 = p.name.split('_')[-1].isdigit()
if condt1 and condt2 and condt3 and condt4:
evtpaths.append(p)
# evtpaths = [p for p in paths.iterdir() if p.is_dir() and len(p.name.split('_'))>=2 and len(p.name.split('_')[-1])>8]
# evtpaths = [p for p in paths.iterdir() if p.is_dir()]
events, similars = [], []
##===================================== 扫A放A, 扫A放B场景()
one2oneAA, one2oneAB = [], []
one2SNAA, one2SNAB = [], []
##===================================== 应用于 11
_tp_events, _fn_events, _fp_events, _tn_events = [], [], [], []
_tp_simi, _fn_simi, _tn_simi, _fp_simi = [], [], [], []
@ -406,9 +398,12 @@ def contrast_pr(paths):
axes[0].hist(np.array(one2SNAA), bins=60, edgecolor='black')
axes[0].set_xlim([-0.2, 1])
axes[0].set_title('AA')
axes[0].set_xlabel(f"Event Num: {len(one2SNAA)}")
axes[1].hist(np.array(one2SNAB), bins=60, edgecolor='black')
axes[1].set_xlim([-0.2, 1])
axes[1].set_title('BB')
axes[1].set_xlabel(f"Event Num: {len(one2SNAB)}")
plt.show()
''''3. ============================= 1:SN 曲线'''
@ -428,16 +423,16 @@ def contrast_pr(paths):
fig, axes = plt.subplots(2, 2)
axes[0, 0].hist(tp_simi, bins=60, edgecolor='black')
axes[0, 0].set_xlim([-0.2, 1])
axes[0, 0].set_title('TP')
axes[0, 0].set_title(f'TP({len(tp_simi)})')
axes[0, 1].hist(fp_simi, bins=60, edgecolor='black')
axes[0, 1].set_xlim([-0.2, 1])
axes[0, 1].set_title('FP')
axes[0, 1].set_title(f'FP({len(fp_simi)})')
axes[1, 0].hist(tn_simi, bins=60, edgecolor='black')
axes[1, 0].set_xlim([-0.2, 1])
axes[1, 0].set_title('TN')
axes[1, 0].set_title(f'TN({len(tn_simi)})')
axes[1, 1].hist(fn_simi, bins=60, edgecolor='black')
axes[1, 1].set_xlim([-0.2, 1])
axes[1, 1].set_title('FN')
axes[1, 1].set_title(f'FN({len(fn_simi)})')
plt.show()
@ -458,16 +453,16 @@ def contrast_pr(paths):
fig, axes = plt.subplots(2, 2)
axes[0, 0].hist(tpsimi, bins=60, edgecolor='black')
axes[0, 0].set_xlim([-0.2, 1])
axes[0, 0].set_title('TP')
axes[0, 0].set_title(f'TP({len(tpsimi)})')
axes[0, 1].hist(fpsimi, bins=60, edgecolor='black')
axes[0, 1].set_xlim([-0.2, 1])
axes[0, 1].set_title('FP')
axes[0, 1].set_title(f'FP({len(fpsimi)})')
axes[1, 0].hist(tnsimi, bins=60, edgecolor='black')
axes[1, 0].set_xlim([-0.2, 1])
axes[1, 0].set_title('TN')
axes[1, 0].set_title(f'TN({len(tnsimi)})')
axes[1, 1].hist(fnsimi, bins=60, edgecolor='black')
axes[1, 1].set_xlim([-0.2, 1])
axes[1, 1].set_title('FN')
axes[1, 1].set_title(f'FN({len(fnsimi)})')
plt.show()
@ -500,7 +495,7 @@ def contrast_pr(paths):
if __name__ == "__main__":
evtpaths = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\images"
evtpaths = r"D:\全实时\source_data\2024122416"
contrast_pr(evtpaths)

117
contrast/select_subimgs.py Normal file
View File

@ -0,0 +1,117 @@
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 23 13:58:13 2024
writting for selectting std subimgs to Wuhuaqi
@author: ym
"""
import os
import time
# import torch
import pickle
# import json
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from scipy.spatial.distance import cdist
from feat_extract.config import config as conf
# from model import resnet18 as resnet18
from feat_extract.inference import FeatsInterface #, inference_image
IMG_FORMAT = ['.bmp', '.jpg', '.jpeg', '.png']
def gen_features(imgpath):
Encoder = FeatsInterface(conf)
imgs, imgnames = [], []
for filename in os.listdir(imgpath):
file, ext = os.path.splitext(filename)
if ext not in IMG_FORMAT: continue
fpath = os.path.join(imgpath, filename)
img = Image.open(fpath)
imgs.append(img)
filelist = file.split("_")
newname = "_".join([filelist[0],filelist[1], filelist[2], filelist[-3], filelist[-2], filelist[-1]])
# imgnames.append(newname)
imgnames.append(file)
features = Encoder.inference(imgs)
features /= np.linalg.norm(features, axis=1)[:, None]
return features, imgnames
def top_p_percent_indices(matrix, p):
"""
Finds the indices of the top p% largest elements in a 2D matrix.
Args:
matrix (np.ndarray): A 2D NumPy array.
p: int, 0-100
Returns:
List[Tuple[int, int]]: A list of indices (row, column) for the top 10% largest elements.
"""
# Flatten the matrix
flat_matrix = matrix.flatten()
# Calculate the threshold for the top 10%
num_elements = len(flat_matrix)
threshold_index = int(num_elements * 0.01*p) # Top 10%
threshold_index = max(1, threshold_index) # Ensure at least one element is considered
threshold_value = np.partition(flat_matrix, -threshold_index)[-threshold_index]
# Create a mask for elements >= threshold
mask = matrix >= threshold_value
# Get the indices of elements that satisfy the mask
indices = np.argwhere(mask)
return list(map(tuple, indices))
def main():
imgpath = r"\\192.168.1.28\share\数据\已完成数据\展厅数据\v1.0\比对数据\整理\zhantingBase\6923555210479"
feats, imgnames = gen_features(imgpath)
n = len(feats)
matrix = 1 - cdist(feats, feats, 'cosine')
nmatrix = np.array([[matrix[i][j] for j in range(n) if i != j] for i in range(n)])
top_p_large_index = top_p_percent_indices(nmatrix, 1)
top_p_small_index = top_p_percent_indices(-1*nmatrix, 1)
simi_mean = np.mean(nmatrix, axis=1)
max_simi = np.max(nmatrix)
max_index = np.where(nmatrix==max_simi)
min_simi = np.min(nmatrix)
min_index = np.where(nmatrix==min_simi)
fig, ax = plt.subplots()
simils = [matrix[i][j] for j in range(n) for i in range(n) if j>i]
ax.hist(simils, bins=60, range=(-0.2, 1), edgecolor='black')
ax.set_xlim([-0.2, 1])
ax.set_title("Similarity")
print("done!")
if __name__ == '__main__':
main()

View File

@ -15,6 +15,7 @@ sys.path.append(r"D:\DetectTracking")
from tracking.utils.plotting import Annotator, colors
from tracking.utils.drawtracks import drawTrack
from tracking.utils.read_data import extract_data, read_tracking_output, read_similar
from tracking.utils.read_data import extract_data_realtime, read_tracking_output_realtime
IMG_FORMAT = ['.bmp', '.jpg', '.jpeg', '.png']
VID_FORMAT = ['.mp4', '.avi']
@ -117,10 +118,12 @@ class ShoppingEvent:
if stype=="data":
self.from_datafile(eventpath)
if stype=="realtime":
self.from_realtime_datafile(eventpath)
if stype=="source":
self.from_source_pkl(eventpath)
self.feats_select = []
self.feats_select = np.empty((0, 256), dtype=np.float64)
self.feats_compose = np.empty((0, 256), dtype=np.float64)
self.select_feats()
self.compose_feats()
@ -289,6 +292,66 @@ class ShoppingEvent:
elif CamerType == '1':
self.front_boxes = tracking_output_boxes
self.front_feats = tracking_output_feats
def from_realtime_datafile(self, eventpath):
# evtList = self.evtname.split('_')
# if len(evtList)>=2 and len(evtList[-1])>=10 and evtList[-1].isdigit():
# self.barcode = evtList[-1]
# if len(evtList)==3 and evtList[-1]== evtList[-2]:
# self.evtType = 'input'
# else:
# self.evtType = 'other'
'''================ path of video ============='''
for vidname in os.listdir(eventpath):
name, ext = os.path.splitext(vidname)
if ext not in VID_FORMAT: continue
vidpath = os.path.join(eventpath, vidname)
CamerType = name.split('_')[0]
if CamerType == '0':
self.back_videopath = vidpath
if CamerType == '1':
self.front_videopath = vidpath
'''================ process.data ============='''
procpath = Path(eventpath).joinpath('process.data')
if procpath.is_file():
SimiDict = read_similar(procpath)
self.one2one = SimiDict['one2one']
self.one2n = SimiDict['one2n']
self.one2SN = SimiDict['one2SN']
'''=========== 0/1_track.data & 0/1_tracking_output.data ======='''
for dataname in os.listdir(eventpath):
datapath = os.path.join(eventpath, dataname)
if not os.path.isfile(datapath): continue
CamerType = dataname.split('_')[0]
'''========== 0/1_track.data =========='''
if dataname.find("_track.data")>0:
trackerboxes, trackerfeats = extract_data_realtime(datapath)
if CamerType == '0':
self.back_trackerboxes = trackerboxes
self.back_trackerfeats = trackerfeats
if CamerType == '1':
self.front_trackerboxes = trackerboxes
self.front_trackerfeats = trackerfeats
'''========== 0/1_tracking_output.data =========='''
if dataname.find("_tracking_output.data")>0:
trackingboxes, trackingfeats, tracking_outboxes, tracking_outfeats = read_tracking_output_realtime(datapath)
if CamerType == '0':
self.back_trackingboxes = trackingboxes
self.back_trackingfeats = trackingfeats
self.back_boxes = tracking_outboxes
self.back_feats = tracking_outfeats
elif CamerType == '1':
self.front_trackingboxes = trackingboxes
self.front_trackingfeats = trackingfeats
self.front_boxes = tracking_outboxes
self.front_feats = tracking_outfeats
@ -305,11 +368,10 @@ class ShoppingEvent:
def select_feats(self):
'''事件的特征选择'''
self.feats_select = []
if len(self.front_feats):
self.feats_select = self.front_feats
self.feats_select = self.front_feats[0]
elif len(self.back_feats):
self.feats_select = self.back_feats
self.feats_select = self.back_feats[0]
def plot_save_image(self, savepath):