modify 1:1 比对方式
This commit is contained in:
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -62,7 +62,7 @@ class Config:
|
|||||||
# test_val = "./data/test_data_100"
|
# test_val = "./data/test_data_100"
|
||||||
|
|
||||||
# test_model = "checkpoints/best_resnet18_v11.pth"
|
# test_model = "checkpoints/best_resnet18_v11.pth"
|
||||||
test_model = "checkpoints/zhanting_cls22_v11.pth"
|
test_model = "checkpoints/zhanting_res_801.pth"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -136,6 +136,8 @@ def stdfeat_infer(imgPath, featPath, bcdSet=None):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
featpath = os.path.join(featPath, f"{bcd}.pickle")
|
featpath = os.path.join(featPath, f"{bcd}.pickle")
|
||||||
|
# if os.path.isfile(featpath):
|
||||||
|
# continue
|
||||||
|
|
||||||
stdbDict = {}
|
stdbDict = {}
|
||||||
t1 = time.time()
|
t1 = time.time()
|
||||||
|
@ -11,7 +11,7 @@ Created on Fri Aug 30 17:53:03 2024
|
|||||||
标准特征提取,并保存至文件夹 stdFeaturePath 中,
|
标准特征提取,并保存至文件夹 stdFeaturePath 中,
|
||||||
也可在运行过程中根据与购物事件集合 barcodes 交集执行
|
也可在运行过程中根据与购物事件集合 barcodes 交集执行
|
||||||
2. 1:1 比对性能测试,
|
2. 1:1 比对性能测试,
|
||||||
func: one2one_eval(resultPath)
|
func: one2one_eval(similPath)
|
||||||
(1) 求购物事件和标准特征级 Barcode 交集,构造 evtDict、stdDict
|
(1) 求购物事件和标准特征级 Barcode 交集,构造 evtDict、stdDict
|
||||||
(2) 构造扫 A 放 A、扫 A 放 B 组合,mergePairs = AA_list + AB_list
|
(2) 构造扫 A 放 A、扫 A 放 B 组合,mergePairs = AA_list + AB_list
|
||||||
(3) 循环计算 mergePairs 中元素 "(A, A) 或 (A, B)" 相似度;
|
(3) 循环计算 mergePairs 中元素 "(A, A) 或 (A, B)" 相似度;
|
||||||
@ -32,6 +32,7 @@ import os
|
|||||||
import sys
|
import sys
|
||||||
import random
|
import random
|
||||||
import pickle
|
import pickle
|
||||||
|
import json
|
||||||
# import torch
|
# import torch
|
||||||
import time
|
import time
|
||||||
# import json
|
# import json
|
||||||
@ -47,10 +48,12 @@ from datetime import datetime
|
|||||||
# from feat_inference import inference_image
|
# from feat_inference import inference_image
|
||||||
|
|
||||||
sys.path.append(r"D:\DetectTracking")
|
sys.path.append(r"D:\DetectTracking")
|
||||||
from tracking.utils.read_data import extract_data, read_tracking_output, read_one2one_simi, read_deletedBarcode_file
|
from tracking.utils.read_data import extract_data, read_tracking_output, read_similar, read_deletedBarcode_file
|
||||||
|
from tracking.utils.plotting import Annotator, colors
|
||||||
from config import config as conf
|
from feat_extract.config import config as conf
|
||||||
from genfeats import model_init, genfeatures, stdfeat_infer
|
from feat_extract.inference import FeatsInterface
|
||||||
|
from utils.event import Event
|
||||||
|
from genfeats import gen_bcd_features
|
||||||
|
|
||||||
IMG_FORMAT = ['.bmp', '.jpg', '.jpeg', '.png']
|
IMG_FORMAT = ['.bmp', '.jpg', '.jpeg', '.png']
|
||||||
|
|
||||||
@ -107,6 +110,10 @@ def creat_shopping_event(eventPath):
|
|||||||
evtType = 'other'
|
evtType = 'other'
|
||||||
|
|
||||||
'''================ 1. 构造事件描述字典,暂定 9 items ==============='''
|
'''================ 1. 构造事件描述字典,暂定 9 items ==============='''
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
event = {}
|
event = {}
|
||||||
event['barcode'] = barcode
|
event['barcode'] = barcode
|
||||||
event['type'] = evtType
|
event['type'] = evtType
|
||||||
@ -118,7 +125,8 @@ def creat_shopping_event(eventPath):
|
|||||||
event['back_feats'] = np.empty((0, 256), dtype=np.float64)
|
event['back_feats'] = np.empty((0, 256), dtype=np.float64)
|
||||||
event['front_feats'] = np.empty((0, 256), dtype=np.float64)
|
event['front_feats'] = np.empty((0, 256), dtype=np.float64)
|
||||||
event['feats_compose'] = np.empty((0, 256), dtype=np.float64)
|
event['feats_compose'] = np.empty((0, 256), dtype=np.float64)
|
||||||
event['one2one_simi'] = None
|
event['one2one'] = None
|
||||||
|
event['one2n'] = None
|
||||||
event['feats_select'] = np.empty((0, 256), dtype=np.float64)
|
event['feats_select'] = np.empty((0, 256), dtype=np.float64)
|
||||||
|
|
||||||
|
|
||||||
@ -145,8 +153,9 @@ def creat_shopping_event(eventPath):
|
|||||||
event['front_feats'] = tracking_output_feats
|
event['front_feats'] = tracking_output_feats
|
||||||
|
|
||||||
if dataname.find("process.data")==0:
|
if dataname.find("process.data")==0:
|
||||||
simiDict = read_one2one_simi(datapath)
|
simiDict = read_similar(datapath)
|
||||||
event['one2one_simi'] = simiDict
|
event['one2one'] = simiDict['one2one']
|
||||||
|
event['one2n'] = simiDict['one2n']
|
||||||
|
|
||||||
|
|
||||||
if len(event['back_boxes'])==0 or len(event['front_boxes'])==0:
|
if len(event['back_boxes'])==0 or len(event['front_boxes'])==0:
|
||||||
@ -215,6 +224,52 @@ def creat_shopping_event(eventPath):
|
|||||||
return event
|
return event
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def plot_save_image(event, savepath):
|
||||||
|
cameras = ('front', 'back')
|
||||||
|
for camera in cameras:
|
||||||
|
if camera == 'front':
|
||||||
|
boxes = event.front_trackerboxes
|
||||||
|
imgpaths = event.front_imgpaths
|
||||||
|
else:
|
||||||
|
boxes = event.back_trackerboxes
|
||||||
|
imgpaths = event.back_imgpaths
|
||||||
|
|
||||||
|
def array2list(bboxes):
|
||||||
|
'''[x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]'''
|
||||||
|
frame_ids = bboxes[:, 7].astype(int)
|
||||||
|
fID = np.unique(bboxes[:, 7].astype(int))
|
||||||
|
fboxes = []
|
||||||
|
for f_id in fID:
|
||||||
|
idx = np.where(frame_ids==f_id)[0]
|
||||||
|
box = bboxes[idx, :]
|
||||||
|
fboxes.append((f_id, box))
|
||||||
|
return fboxes
|
||||||
|
|
||||||
|
fboxes = array2list(boxes)
|
||||||
|
|
||||||
|
for fid, fbox in fboxes:
|
||||||
|
imgpath = imgpaths[int(fid-1)]
|
||||||
|
|
||||||
|
image = cv2.imread(imgpath)
|
||||||
|
|
||||||
|
annotator = Annotator(image.copy(), line_width=2)
|
||||||
|
for i, *xyxy, tid, score, cls, fid, bid in enumerate(fbox):
|
||||||
|
label = f'{int(id), int(cls)}'
|
||||||
|
if tid >=0 and cls==0:
|
||||||
|
color = colors(int(cls), True)
|
||||||
|
elif tid >=0 and cls!=0:
|
||||||
|
color = colors(int(id), True)
|
||||||
|
else:
|
||||||
|
color = colors(19, True) # 19为调色板的最后一个元素
|
||||||
|
annotator.box_label(xyxy, label, color=color)
|
||||||
|
|
||||||
|
im0 = annotator.result()
|
||||||
|
spath = os.path.join(savepath, Path(imgpath).name)
|
||||||
|
cv2.imwrite(spath, im0)
|
||||||
|
|
||||||
|
|
||||||
def save_event_subimg(event, savepath):
|
def save_event_subimg(event, savepath):
|
||||||
'''
|
'''
|
||||||
功能: 保存一次购物事件的轨迹子图
|
功能: 保存一次购物事件的轨迹子图
|
||||||
@ -224,160 +279,92 @@ def save_event_subimg(event, savepath):
|
|||||||
子图保存次序:先前摄、后后摄,以 k 为编号,和 "feats_compose" 中次序相同
|
子图保存次序:先前摄、后后摄,以 k 为编号,和 "feats_compose" 中次序相同
|
||||||
'''
|
'''
|
||||||
cameras = ('front', 'back')
|
cameras = ('front', 'back')
|
||||||
k = 0
|
|
||||||
for camera in cameras:
|
for camera in cameras:
|
||||||
if camera == 'front':
|
if camera == 'front':
|
||||||
boxes = event['front_boxes']
|
boxes = event.front_boxes
|
||||||
imgpaths = event['front_imgpaths']
|
imgpaths = event.front_imgpaths
|
||||||
else:
|
else:
|
||||||
boxes = event['back_boxes']
|
boxes = event.back_boxes
|
||||||
imgpaths = event['back_imgpaths']
|
imgpaths = event.back_imgpaths
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
for i, box in enumerate(boxes):
|
for i, box in enumerate(boxes):
|
||||||
x1, y1, x2, y2, tid, score, cls, fid, bid = box
|
x1, y1, x2, y2, tid, score, cls, fid, bid = box
|
||||||
|
|
||||||
imgpath = imgpaths[i]
|
imgpath = imgpaths[int(fid-1)]
|
||||||
image = cv2.imread(imgpath)
|
image = cv2.imread(imgpath)
|
||||||
|
|
||||||
subimg = image[int(y1/2):int(y2/2), int(x1/2):int(x2/2), :]
|
subimg = image[int(y1/2):int(y2/2), int(x1/2):int(x2/2), :]
|
||||||
|
|
||||||
camerType, timeTamp, _, frameID = os.path.basename(imgpath).split('.')[0].split('_')
|
camerType, timeTamp, _, frameID = os.path.basename(imgpath).split('.')[0].split('_')
|
||||||
subimgName = f"{k}_cam-{camerType}_tid-{int(tid)}_fid-({int(fid)}, {frameID}).png"
|
subimgName = f"cam{camerType}_{i}_tid{int(tid)}_fid({int(fid)}, {frameID}).png"
|
||||||
spath = os.path.join(savepath, subimgName)
|
spath = os.path.join(savepath, subimgName)
|
||||||
|
|
||||||
cv2.imwrite(spath, subimg)
|
cv2.imwrite(spath, subimg)
|
||||||
k += 1
|
|
||||||
# basename = os.path.basename(event['filepath'])
|
# basename = os.path.basename(event['filepath'])
|
||||||
print(f"Image saved: {os.path.basename(event['filepath'])}")
|
print(f"Image saved: {os.path.basename(event.eventpath)}")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def one2one_eval(resultPath):
|
|
||||||
|
|
||||||
# stdBarcode = [p.stem for p in Path(stdFeaturePath).iterdir() if p.is_file() and p.suffix=='.pickle']
|
|
||||||
stdBarcode = [p.stem for p in Path(stdBarcodePath).iterdir() if p.is_file() and p.suffix=='.pickle']
|
|
||||||
|
|
||||||
|
|
||||||
'''购物事件列表,该列表中的 Barcode 存在于标准的 stdBarcode 内'''
|
|
||||||
evtList = [(p.stem, p.stem.split('_')[-1]) for p in Path(eventFeatPath).iterdir()
|
|
||||||
if p.is_file()
|
|
||||||
and p.suffix=='.pickle'
|
|
||||||
and (len(p.stem.split('_'))==2 or len(p.stem.split('_'))==3)
|
|
||||||
and p.stem.split('_')[-1].isdigit()
|
|
||||||
and p.stem.split('_')[-1] in stdBarcode
|
|
||||||
]
|
|
||||||
|
|
||||||
barcodes = set([bcd for _, bcd in evtList])
|
|
||||||
|
|
||||||
'''标准特征集图像样本经特征提取并保存,运行一次后无需再运行'''
|
|
||||||
stdfeat_infer(stdBarcodePath, stdFeaturePath, barcodes)
|
|
||||||
|
|
||||||
'''========= 构建用于比对的标准特征字典 ============='''
|
|
||||||
stdDict = {}
|
|
||||||
for barcode in barcodes:
|
|
||||||
stdpath = os.path.join(stdFeaturePath, barcode+'.pickle')
|
|
||||||
with open(stdpath, 'rb') as f:
|
|
||||||
stddata = pickle.load(f)
|
|
||||||
stdDict[barcode] = stddata
|
|
||||||
|
|
||||||
'''========= 构建用于比对的操作事件字典 ============='''
|
|
||||||
evtDict = {}
|
|
||||||
for event, barcode in evtList:
|
|
||||||
evtpath = os.path.join(eventFeatPath, event+'.pickle')
|
|
||||||
with open(evtpath, 'rb') as f:
|
|
||||||
evtdata = pickle.load(f)
|
|
||||||
evtDict[event] = evtdata
|
|
||||||
|
|
||||||
|
|
||||||
'''===== 构造 3 个事件对: 扫 A 放 A, 扫 A 放 B, 合并 ===================='''
|
|
||||||
AA_list = [(event, barcode, "same") for event, barcode in evtList]
|
|
||||||
AB_list = []
|
|
||||||
for event, barcode in evtList:
|
|
||||||
dset = list(barcodes.symmetric_difference(set([barcode])))
|
|
||||||
idx = random.randint(0, len(dset)-1)
|
|
||||||
AB_list.append((event, dset[idx], "diff"))
|
|
||||||
|
|
||||||
mergePairs = AA_list + AB_list
|
|
||||||
|
|
||||||
'''读取事件、标准特征文件中数据,以 AA_list 和 AB_list 中关键字为 key 生成字典'''
|
|
||||||
rltdata, rltdata_ft16, rltdata_ft16_ = [], [], []
|
|
||||||
for evt, stdbcd, label in mergePairs:
|
|
||||||
event = evtDict[evt]
|
|
||||||
|
|
||||||
## 判断是否存在轨迹图像文件夹,不存在则创建文件夹并保存轨迹图像
|
def data_precision_compare(stdfeat, evtfeat, evtMessage, save=True):
|
||||||
pairpath = os.path.join(subimgPath, f"{evt}")
|
evt, stdbcd, label = evtMessage
|
||||||
if not os.path.exists(pairpath):
|
rltdata, rltdata_ft16, rltdata_ft16_ = [], [], []
|
||||||
os.makedirs(pairpath)
|
|
||||||
save_event_subimg(event, pairpath)
|
|
||||||
|
|
||||||
## 判断是否存在 barcode 标准样本集图像文件夹,不存在则创建文件夹并存储 barcode 样本集图像
|
|
||||||
stdImgpath = stdDict[stdbcd]["imgpaths"]
|
|
||||||
pstdpath = os.path.join(subimgPath, f"{stdbcd}")
|
|
||||||
if not os.path.exists(pstdpath):
|
|
||||||
os.makedirs(pstdpath)
|
|
||||||
ii = 1
|
|
||||||
for filepath in stdImgpath:
|
|
||||||
stdpath = os.path.join(pstdpath, f"{stdbcd}_{ii}.png")
|
|
||||||
shutil.copy2(filepath, stdpath)
|
|
||||||
ii += 1
|
|
||||||
|
|
||||||
##============================================ float32
|
|
||||||
stdfeat = stdDict[stdbcd]["feats"]
|
|
||||||
evtfeat = event["feats_compose"]
|
|
||||||
|
|
||||||
matrix = 1 - cdist(stdfeat, evtfeat, 'cosine')
|
|
||||||
simi_mean = np.mean(matrix)
|
|
||||||
simi_max = np.max(matrix)
|
|
||||||
stdfeatm = np.mean(stdfeat, axis=0, keepdims=True)
|
|
||||||
evtfeatm = np.mean(evtfeat, axis=0, keepdims=True)
|
|
||||||
simi_mfeat = 1- np.maximum(0.0, cdist(stdfeatm, evtfeatm, 'cosine'))
|
|
||||||
rltdata.append((label, stdbcd, evt, simi_mean, simi_max, simi_mfeat[0,0]))
|
|
||||||
|
|
||||||
|
|
||||||
##============================================ float16
|
|
||||||
stdfeat_ft16 = stdfeat.astype(np.float16)
|
|
||||||
evtfeat_ft16 = evtfeat.astype(np.float16)
|
|
||||||
stdfeat_ft16 /= np.linalg.norm(stdfeat_ft16, axis=1)[:, None]
|
|
||||||
evtfeat_ft16 /= np.linalg.norm(evtfeat_ft16, axis=1)[:, None]
|
|
||||||
|
|
||||||
|
|
||||||
matrix_ft16 = 1 - cdist(stdfeat_ft16, evtfeat_ft16, 'cosine')
|
|
||||||
simi_mean_ft16 = np.mean(matrix_ft16)
|
|
||||||
simi_max_ft16 = np.max(matrix_ft16)
|
|
||||||
stdfeatm_ft16 = np.mean(stdfeat_ft16, axis=0, keepdims=True)
|
|
||||||
evtfeatm_ft16 = np.mean(evtfeat_ft16, axis=0, keepdims=True)
|
|
||||||
simi_mfeat_ft16 = 1- np.maximum(0.0, cdist(stdfeatm_ft16, evtfeatm_ft16, 'cosine'))
|
|
||||||
rltdata_ft16.append((label, stdbcd, evt, simi_mean_ft16, simi_max_ft16, simi_mfeat_ft16[0,0]))
|
|
||||||
|
|
||||||
'''****************** uint8 is ok!!!!!! ******************'''
|
|
||||||
##============================================ uint8
|
|
||||||
# stdfeat_uint8, stdfeat_ft16_ = ft16_to_uint8(stdfeat_ft16)
|
|
||||||
# evtfeat_uint8, evtfeat_ft16_ = ft16_to_uint8(evtfeat_ft16)
|
|
||||||
|
|
||||||
stdfeat_uint8 = (stdfeat_ft16*128).astype(np.int8)
|
|
||||||
evtfeat_uint8 = (evtfeat_ft16*128).astype(np.int8)
|
|
||||||
stdfeat_ft16_ = stdfeat_uint8.astype(np.float16)/128
|
|
||||||
evtfeat_ft16_ = evtfeat_uint8.astype(np.float16)/128
|
|
||||||
|
|
||||||
|
|
||||||
absdiff = np.linalg.norm(stdfeat_ft16_ - stdfeat) / stdfeat.size
|
|
||||||
|
|
||||||
matrix_ft16_ = 1 - cdist(stdfeat_ft16_, evtfeat_ft16_, 'cosine')
|
|
||||||
simi_mean_ft16_ = np.mean(matrix_ft16_)
|
|
||||||
simi_max_ft16_ = np.max(matrix_ft16_)
|
|
||||||
stdfeatm_ft16_ = np.mean(stdfeat_ft16_, axis=0, keepdims=True)
|
|
||||||
evtfeatm_ft16_ = np.mean(evtfeat_ft16_, axis=0, keepdims=True)
|
|
||||||
simi_mfeat_ft16_ = 1- np.maximum(0.0, cdist(stdfeatm_ft16_, evtfeatm_ft16_, 'cosine'))
|
|
||||||
rltdata_ft16_.append((label, stdbcd, evt, simi_mean_ft16_, simi_max_ft16_, simi_mfeat_ft16_[0,0]))
|
|
||||||
|
|
||||||
|
|
||||||
|
matrix = 1 - cdist(stdfeat, evtfeat, 'cosine')
|
||||||
|
simi_mean = np.mean(matrix)
|
||||||
|
simi_max = np.max(matrix)
|
||||||
|
stdfeatm = np.mean(stdfeat, axis=0, keepdims=True)
|
||||||
|
evtfeatm = np.mean(evtfeat, axis=0, keepdims=True)
|
||||||
|
simi_mfeat = 1- np.maximum(0.0, cdist(stdfeatm, evtfeatm, 'cosine'))
|
||||||
|
rltdata = [label, stdbcd, evt, simi_mean, simi_max, simi_mfeat[0,0]]
|
||||||
|
|
||||||
tm = datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H%M%S')
|
|
||||||
##================================================ save as float32,
|
##================================================================= float16
|
||||||
rppath = os.path.join(resultPath, f'{tm}.pickle')
|
stdfeat_ft16 = stdfeat.astype(np.float16)
|
||||||
|
evtfeat_ft16 = evtfeat.astype(np.float16)
|
||||||
|
stdfeat_ft16 /= np.linalg.norm(stdfeat_ft16, axis=1)[:, None]
|
||||||
|
evtfeat_ft16 /= np.linalg.norm(evtfeat_ft16, axis=1)[:, None]
|
||||||
|
|
||||||
|
|
||||||
|
matrix_ft16 = 1 - cdist(stdfeat_ft16, evtfeat_ft16, 'cosine')
|
||||||
|
simi_mean_ft16 = np.mean(matrix_ft16)
|
||||||
|
simi_max_ft16 = np.max(matrix_ft16)
|
||||||
|
stdfeatm_ft16 = np.mean(stdfeat_ft16, axis=0, keepdims=True)
|
||||||
|
evtfeatm_ft16 = np.mean(evtfeat_ft16, axis=0, keepdims=True)
|
||||||
|
simi_mfeat_ft16 = 1- np.maximum(0.0, cdist(stdfeatm_ft16, evtfeatm_ft16, 'cosine'))
|
||||||
|
rltdata_ft16 = [label, stdbcd, evt, simi_mean_ft16, simi_max_ft16, simi_mfeat_ft16[0,0]]
|
||||||
|
|
||||||
|
'''****************** uint8 is ok!!!!!! ******************'''
|
||||||
|
##=================================================================== uint8
|
||||||
|
# stdfeat_uint8, stdfeat_ft16_ = ft16_to_uint8(stdfeat_ft16)
|
||||||
|
# evtfeat_uint8, evtfeat_ft16_ = ft16_to_uint8(evtfeat_ft16)
|
||||||
|
|
||||||
|
stdfeat_uint8 = (stdfeat_ft16*128).astype(np.int8)
|
||||||
|
evtfeat_uint8 = (evtfeat_ft16*128).astype(np.int8)
|
||||||
|
stdfeat_ft16_ = stdfeat_uint8.astype(np.float16)/128
|
||||||
|
evtfeat_ft16_ = evtfeat_uint8.astype(np.float16)/128
|
||||||
|
|
||||||
|
absdiff = np.linalg.norm(stdfeat_ft16_ - stdfeat) / stdfeat.size
|
||||||
|
|
||||||
|
matrix_ft16_ = 1 - cdist(stdfeat_ft16_, evtfeat_ft16_, 'cosine')
|
||||||
|
simi_mean_ft16_ = np.mean(matrix_ft16_)
|
||||||
|
simi_max_ft16_ = np.max(matrix_ft16_)
|
||||||
|
stdfeatm_ft16_ = np.mean(stdfeat_ft16_, axis=0, keepdims=True)
|
||||||
|
evtfeatm_ft16_ = np.mean(evtfeat_ft16_, axis=0, keepdims=True)
|
||||||
|
simi_mfeat_ft16_ = 1- np.maximum(0.0, cdist(stdfeatm_ft16_, evtfeatm_ft16_, 'cosine'))
|
||||||
|
rltdata_ft16_ = [label, stdbcd, evt, simi_mean_ft16_, simi_max_ft16_, simi_mfeat_ft16_[0,0]]
|
||||||
|
|
||||||
|
if not save:
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
##========================================================= save as float32
|
||||||
|
rppath = os.path.join(similPath, f'{evt}_ft32.pickle')
|
||||||
with open(rppath, 'wb') as f:
|
with open(rppath, 'wb') as f:
|
||||||
pickle.dump(rltdata, f)
|
pickle.dump(rltdata, f)
|
||||||
|
|
||||||
rtpath = os.path.join(resultPath, f'{tm}.txt')
|
rtpath = os.path.join(similPath, f'{evt}_ft32.txt')
|
||||||
with open(rtpath, 'w', encoding='utf-8') as f:
|
with open(rtpath, 'w', encoding='utf-8') as f:
|
||||||
for result in rltdata:
|
for result in rltdata:
|
||||||
part = [f"{x:.3f}" if isinstance(x, float) else str(x) for x in result]
|
part = [f"{x:.3f}" if isinstance(x, float) else str(x) for x in result]
|
||||||
@ -385,12 +372,12 @@ def one2one_eval(resultPath):
|
|||||||
f.write(line + '\n')
|
f.write(line + '\n')
|
||||||
|
|
||||||
|
|
||||||
##================================================ save as float16,
|
##========================================================= save as float16
|
||||||
rppath_ft16 = os.path.join(resultPath, f'{tm}_ft16.pickle')
|
rppath_ft16 = os.path.join(similPath, f'{evt}_ft16.pickle')
|
||||||
with open(rppath_ft16, 'wb') as f:
|
with open(rppath_ft16, 'wb') as f:
|
||||||
pickle.dump(rltdata_ft16, f)
|
pickle.dump(rltdata_ft16, f)
|
||||||
|
|
||||||
rtpath_ft16 = os.path.join(resultPath, f'{tm}_ft16.txt')
|
rtpath_ft16 = os.path.join(similPath, f'{evt}_ft16.txt')
|
||||||
with open(rtpath_ft16, 'w', encoding='utf-8') as f:
|
with open(rtpath_ft16, 'w', encoding='utf-8') as f:
|
||||||
for result in rltdata_ft16:
|
for result in rltdata_ft16:
|
||||||
part = [f"{x:.3f}" if isinstance(x, float) else str(x) for x in result]
|
part = [f"{x:.3f}" if isinstance(x, float) else str(x) for x in result]
|
||||||
@ -398,42 +385,145 @@ def one2one_eval(resultPath):
|
|||||||
f.write(line + '\n')
|
f.write(line + '\n')
|
||||||
|
|
||||||
|
|
||||||
##================================================ save as uint8,
|
##=========================================================== save as uint8
|
||||||
rppath_uint8 = os.path.join(resultPath, f'{tm}_uint8.pickle')
|
rppath_uint8 = os.path.join(similPath, f'{evt}_uint8.pickle')
|
||||||
with open(rppath_uint8, 'wb') as f:
|
with open(rppath_uint8, 'wb') as f:
|
||||||
pickle.dump(rltdata_ft16_, f)
|
pickle.dump(rltdata_ft16_, f)
|
||||||
|
|
||||||
rtpath_uint8 = os.path.join(resultPath, f'{tm}_uint8.txt')
|
rtpath_uint8 = os.path.join(similPath, f'{evt}_uint8.txt')
|
||||||
with open(rtpath_uint8, 'w', encoding='utf-8') as f:
|
with open(rtpath_uint8, 'w', encoding='utf-8') as f:
|
||||||
for result in rltdata_ft16_:
|
for result in rltdata_ft16_:
|
||||||
part = [f"{x:.3f}" if isinstance(x, float) else str(x) for x in result]
|
part = [f"{x:.3f}" if isinstance(x, float) else str(x) for x in result]
|
||||||
line = ', '.join(part)
|
line = ', '.join(part)
|
||||||
f.write(line + '\n')
|
f.write(line + '\n')
|
||||||
|
|
||||||
|
|
||||||
|
def one2one_simi():
|
||||||
|
'''
|
||||||
|
stdFeaturePath: 标准特征集地址
|
||||||
|
eventDataPath: Event对象地址
|
||||||
|
'''
|
||||||
|
|
||||||
|
stdBarcode = [p.stem for p in Path(stdFeaturePath).iterdir() if p.is_file() and p.suffix=='.pickle']
|
||||||
|
|
||||||
|
'''======1. 购物事件列表,该列表中的 Barcode 存在于标准的 stdBarcode 内 ==='''
|
||||||
|
evtList = [(p.stem, p.stem.split('_')[-1]) for p in Path(eventDataPath).iterdir()
|
||||||
|
if p.is_file()
|
||||||
|
and p.suffix=='.pickle'
|
||||||
|
and (len(p.stem.split('_'))==2 or len(p.stem.split('_'))==3)
|
||||||
|
and p.stem.split('_')[-1].isdigit()
|
||||||
|
and p.stem.split('_')[-1] in stdBarcode
|
||||||
|
]
|
||||||
|
barcodes = set([bcd for _, bcd in evtList])
|
||||||
|
|
||||||
|
'''======2. 构建用于比对的标准特征字典 ============='''
|
||||||
|
stdDict = {}
|
||||||
|
for barcode in barcodes:
|
||||||
|
stdpath = os.path.join(stdFeaturePath, barcode+'.pickle')
|
||||||
|
with open(stdpath, 'rb') as f:
|
||||||
|
stddata = pickle.load(f)
|
||||||
|
stdDict[barcode] = stddata
|
||||||
|
|
||||||
|
|
||||||
|
'''======3. 构建用于比对的操作事件字典 ============='''
|
||||||
|
evtDict = {}
|
||||||
|
for evtname, barcode in evtList:
|
||||||
|
evtpath = os.path.join(eventDataPath, evtname+'.pickle')
|
||||||
|
with open(evtpath, 'rb') as f:
|
||||||
|
evtdata = pickle.load(f)
|
||||||
|
evtDict[evtname] = evtdata
|
||||||
|
|
||||||
|
|
||||||
|
'''======4.1 事件轨迹子图保存 ======================'''
|
||||||
|
error_event = []
|
||||||
|
for evtname, event in evtDict.items():
|
||||||
|
pairpath = os.path.join(subimgPath, f"{evtname}")
|
||||||
|
if not os.path.exists(pairpath):
|
||||||
|
os.makedirs(pairpath)
|
||||||
|
try:
|
||||||
|
save_event_subimg(event, pairpath)
|
||||||
|
except Exception as e:
|
||||||
|
error_event.append(evtname)
|
||||||
|
|
||||||
|
img_path = os.path.join(imagePath, f"{evtname}")
|
||||||
|
if not os.path.exists(img_path):
|
||||||
|
os.makedirs(img_path)
|
||||||
|
try:
|
||||||
|
plot_save_image(event, img_path)
|
||||||
|
except Exception as e:
|
||||||
|
error_event.append(evtname)
|
||||||
|
|
||||||
|
|
||||||
|
errfile = os.path.join(subimgPath, f'error_event.txt')
|
||||||
|
with open(errfile, 'w', encoding='utf-8') as f:
|
||||||
|
for line in error_event:
|
||||||
|
f.write(line + '\n')
|
||||||
|
|
||||||
|
|
||||||
|
'''======4.2 barcode 标准图像保存 =================='''
|
||||||
|
# for stdbcd in barcodes:
|
||||||
|
# stdImgpath = stdDict[stdbcd]["imgpaths"]
|
||||||
|
# pstdpath = os.path.join(subimgPath, f"{stdbcd}")
|
||||||
|
# if not os.path.exists(pstdpath):
|
||||||
|
# os.makedirs(pstdpath)
|
||||||
|
# ii = 1
|
||||||
|
# for filepath in stdImgpath:
|
||||||
|
# stdpath = os.path.join(pstdpath, f"{stdbcd}_{ii}.png")
|
||||||
|
# shutil.copy2(filepath, stdpath)
|
||||||
|
# ii += 1
|
||||||
|
|
||||||
|
'''======5 构造 3 个事件对: 扫 A 放 A, 扫 A 放 B, 合并 ===================='''
|
||||||
|
AA_list = [(evtname, barcode, "same") for evtname, barcode in evtList]
|
||||||
|
AB_list = []
|
||||||
|
for evtname, barcode in evtList:
|
||||||
|
dset = list(barcodes.symmetric_difference(set([barcode])))
|
||||||
|
if len(dset):
|
||||||
|
idx = random.randint(0, len(dset)-1)
|
||||||
|
AB_list.append((evtname, dset[idx], "diff"))
|
||||||
|
|
||||||
|
mergePairs = AA_list + AB_list
|
||||||
|
|
||||||
|
'''======6 计算事件、标准特征集相似度 =================='''
|
||||||
|
rltdata = []
|
||||||
|
for i in range(len(mergePairs)):
|
||||||
|
evtname, stdbcd, label = mergePairs[i]
|
||||||
|
event = evtDict[evtname]
|
||||||
|
|
||||||
|
##============================================ float32
|
||||||
|
stdfeat = stdDict[stdbcd]["feats_ft32"]
|
||||||
|
evtfeat = event.feats_compose
|
||||||
|
|
||||||
|
if len(evtfeat)==0: continue
|
||||||
|
|
||||||
|
matrix = 1 - cdist(stdfeat, evtfeat, 'cosine')
|
||||||
|
matrix[matrix < 0] = 0
|
||||||
|
|
||||||
|
|
||||||
|
simi_mean = np.mean(matrix)
|
||||||
|
simi_max = np.max(matrix)
|
||||||
|
stdfeatm = np.mean(stdfeat, axis=0, keepdims=True)
|
||||||
|
evtfeatm = np.mean(evtfeat, axis=0, keepdims=True)
|
||||||
|
simi_mfeat = 1- np.maximum(0.0, cdist(stdfeatm, evtfeatm, 'cosine'))
|
||||||
|
rltdata.append((label, stdbcd, evtname, simi_mean, simi_max, simi_mfeat[0,0]))
|
||||||
|
|
||||||
|
'''================ float32、16、int8 精度比较与存储 ============='''
|
||||||
|
# data_precision_compare(stdfeat, evtfeat, mergePairs[i], save=True)
|
||||||
|
|
||||||
print("func: one2one_eval(), have finished!")
|
print("func: one2one_eval(), have finished!")
|
||||||
|
|
||||||
|
return rltdata
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def compute_precise_recall(pickpath):
|
def compute_precise_recall(rltdata):
|
||||||
|
|
||||||
pickfile = os.path.basename(pickpath)
|
|
||||||
file, ext = os.path.splitext(pickfile)
|
|
||||||
|
|
||||||
if ext != '.pickle': return
|
|
||||||
if file.find('ft16') < 0: return
|
|
||||||
|
|
||||||
with open(pickpath, 'rb') as f:
|
|
||||||
results = pickle.load(f)
|
|
||||||
|
|
||||||
Same, Cross = [], []
|
Same, Cross = [], []
|
||||||
for label, stdbcd, evt, simi_mean, simi_max, simi_mft in results:
|
for label, stdbcd, evtname, simi_mean, simi_max, simi_mft in rltdata:
|
||||||
if label == "same":
|
if label == "same":
|
||||||
Same.append(simi_mean)
|
Same.append(simi_mean)
|
||||||
if label == "diff":
|
if label == "diff":
|
||||||
Cross.append(simi_mean)
|
Cross.append(simi_mean)
|
||||||
|
|
||||||
|
|
||||||
Same = np.array(Same)
|
Same = np.array(Same)
|
||||||
Cross = np.array(Cross)
|
Cross = np.array(Cross)
|
||||||
TPFN = len(Same)
|
TPFN = len(Same)
|
||||||
@ -480,115 +570,135 @@ def compute_precise_recall(pickpath):
|
|||||||
ax.set_xlabel(f"Same Num: {TPFN}, Cross Num: {TNFP}")
|
ax.set_xlabel(f"Same Num: {TPFN}, Cross Num: {TNFP}")
|
||||||
ax.legend()
|
ax.legend()
|
||||||
plt.show()
|
plt.show()
|
||||||
plt.savefig(f'./result/{file}_pr.png') # svg, png, pdf
|
|
||||||
|
rltpath = os.path.join(similPath, 'pr.png')
|
||||||
|
plt.savefig(rltpath) # svg, png, pdf
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def gen_eventdict(eventDatePath, saveimg=True):
|
def gen_eventdict(sourcePath, saveimg=True):
|
||||||
eventList = []
|
eventList = []
|
||||||
# k = 0
|
errEvents = []
|
||||||
for datePath in eventDatePath:
|
k = 0
|
||||||
for eventName in os.listdir(datePath):
|
for source_path in sourcePath:
|
||||||
|
bname = os.path.basename(source_path)
|
||||||
|
|
||||||
|
pickpath = os.path.join(eventDataPath, f"{bname}.pickle")
|
||||||
|
if os.path.isfile(pickpath): continue
|
||||||
|
|
||||||
|
# if bname != "20241129-100321-a9dae9e3-7db5-4e31-959c-d7dfc228923e_6972636670213":
|
||||||
|
# continue
|
||||||
|
|
||||||
pickpath = os.path.join(eventFeatPath, f"{eventName}.pickle")
|
|
||||||
if os.path.isfile(pickpath):
|
|
||||||
continue
|
# eventDict = creat_shopping_event(eventPath)
|
||||||
eventPath = os.path.join(datePath, eventName)
|
# if eventDict:
|
||||||
|
# eventList.append(eventDict)
|
||||||
|
# with open(pickpath, 'wb') as f:
|
||||||
|
# pickle.dump(eventDict, f)
|
||||||
|
# print(f"Event: {eventName}, have saved!")
|
||||||
|
|
||||||
eventDict = creat_shopping_event(eventPath)
|
# if saveimg and eventDict:
|
||||||
if eventDict:
|
# basename = os.path.basename(eventDict['filepath'])
|
||||||
eventList.append(eventDict)
|
# savepath = os.path.join(subimgPath, basename)
|
||||||
with open(pickpath, 'wb') as f:
|
# if not os.path.exists(savepath):
|
||||||
pickle.dump(eventDict, f)
|
# os.makedirs(savepath)
|
||||||
print(f"Event: {eventName}, have saved!")
|
# save_event_subimg(eventDict, savepath)
|
||||||
|
|
||||||
|
try:
|
||||||
|
event = Event(source_path)
|
||||||
|
eventList.append(event)
|
||||||
|
with open(pickpath, 'wb') as f:
|
||||||
|
pickle.dump(event, f)
|
||||||
|
print(bname)
|
||||||
|
except Exception as e:
|
||||||
|
errEvents.append(source_path)
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
# k += 1
|
||||||
|
# if k==10:
|
||||||
|
# break
|
||||||
|
|
||||||
# k += 1
|
errfile = os.path.join(eventDataPath, f'error_events.txt')
|
||||||
# if k==1:
|
with open(errfile, 'w', encoding='utf-8') as f:
|
||||||
# break
|
for line in errEvents:
|
||||||
|
f.write(line + '\n')
|
||||||
## 保存轨迹中 boxes 子图
|
|
||||||
if not saveimg:
|
|
||||||
return
|
|
||||||
for event in eventList:
|
|
||||||
basename = os.path.basename(event['filepath'])
|
|
||||||
savepath = os.path.join(subimgPath, basename)
|
|
||||||
if not os.path.exists(savepath):
|
|
||||||
os.makedirs(savepath)
|
|
||||||
save_event_subimg(event, savepath)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def test_one2one():
|
def test_one2one():
|
||||||
eventDatePath = [r'\\192.168.1.28\share\测试_202406\1101\images',
|
bcdList, event_spath = [], []
|
||||||
# r'\\192.168.1.28\share\测试_202406\0910\images',
|
for evtpath in eventSourcePath:
|
||||||
# r'\\192.168.1.28\share\测试_202406\0723\0723_1',
|
|
||||||
# r'\\192.168.1.28\share\测试_202406\0723\0723_2',
|
|
||||||
# r'\\192.168.1.28\share\测试_202406\0723\0723_3',
|
|
||||||
# r'\\192.168.1.28\share\测试_202406\0722\0722_01',
|
|
||||||
# r'\\192.168.1.28\share\测试_202406\0722\0722_02'
|
|
||||||
# r'\\192.168.1.28\share\测试_202406\0719\719_3',
|
|
||||||
# r'\\192.168.1.28\share\测试_202406\0716\0716_1',
|
|
||||||
# r'\\192.168.1.28\share\测试_202406\0716\0716_2',
|
|
||||||
# r'\\192.168.1.28\share\测试_202406\0716\0716_3',
|
|
||||||
# r'\\192.168.1.28\share\测试_202406\0712\0712_1', # 无帧图像
|
|
||||||
# r'\\192.168.1.28\share\测试_202406\0712\0712_2', # 无帧图像
|
|
||||||
]
|
|
||||||
bcdList = []
|
|
||||||
for evtpath in eventDatePath:
|
|
||||||
for evtname in os.listdir(evtpath):
|
for evtname in os.listdir(evtpath):
|
||||||
evt = evtname.split('_')
|
evt = evtname.split('_')
|
||||||
|
dirpath = os.path.join(evtpath, evtname)
|
||||||
|
if os.path.isfile(dirpath): continue
|
||||||
if len(evt)>=2 and evt[-1].isdigit() and len(evt[-1])>=10:
|
if len(evt)>=2 and evt[-1].isdigit() and len(evt[-1])>=10:
|
||||||
bcdList.append(evt[-1])
|
bcdList.append(evt[-1])
|
||||||
|
event_spath.append(os.path.join(evtpath, evtname))
|
||||||
|
|
||||||
bcdSet = set(bcdList)
|
bcdSet = set(bcdList)
|
||||||
|
'''==== 1. 生成标准特征集, 只需运行一次, 在 genfeats.py 中实现 ==========='''
|
||||||
|
# gen_bcd_features(stdSamplePath, stdBarcodePath, stdFeaturePath, bcdSet)
|
||||||
|
|
||||||
|
|
||||||
model = model_init(conf)
|
|
||||||
|
|
||||||
'''==== 1. 生成标准特征集, 只需运行一次 ==============='''
|
|
||||||
genfeatures(model, stdSamplePath, stdBarcodePath, stdFeaturePath, bcdSet)
|
|
||||||
print("stdFeats have generated and saved!")
|
print("stdFeats have generated and saved!")
|
||||||
|
|
||||||
|
|
||||||
'''==== 2. 生成事件字典, 只需运行一次 ==============='''
|
'''==== 2. 生成事件字典, 只需运行一次 ==============='''
|
||||||
|
gen_eventdict(event_spath)
|
||||||
gen_eventdict(eventDatePath)
|
|
||||||
print("eventList have generated and saved!")
|
print("eventList have generated and saved!")
|
||||||
|
|
||||||
|
|
||||||
'''==== 3. 1:1性能评估 ==============='''
|
'''==== 3. 1:1性能评估 ==============='''
|
||||||
one2one_eval(resultPath)
|
rltdata = one2one_simi()
|
||||||
for filename in os.listdir(resultPath):
|
compute_precise_recall(rltdata)
|
||||||
if filename.find('.pickle') < 0: continue
|
|
||||||
if filename.find('0911') < 0: continue
|
|
||||||
pickpath = os.path.join(resultPath, filename)
|
|
||||||
compute_precise_recall(pickpath)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
'''
|
'''
|
||||||
共6个地址:
|
共7个地址:
|
||||||
(1) stdSamplePath: 用于生成比对标准特征集的原始图像地址
|
(1) stdSamplePath: 用于生成比对标准特征集的原始图像地址
|
||||||
(2) stdBarcodePath: 比对标准特征集原始图像地址的pickle文件存储,{barcode: [imgpath1, imgpath1, ...]}
|
(2) stdBarcodePath: 比对标准特征集原始图像地址的pickle文件存储,{barcode: [imgpath1, imgpath1, ...]}
|
||||||
(3) stdFeaturePath: 比对标准特征集特征存储地址
|
(3) stdFeaturePath: 比对标准特征集特征存储地址
|
||||||
(4) eventFeatPath: 用于1:1比对的购物事件特征存储地址、对应子图存储地址
|
(4) eventSourcePath: 事件地址
|
||||||
(5) subimgPath: 1:1比对购物事件轨迹、标准barcode所对应的 subimgs 存储地址
|
(5) resultPath: 结果存储地址
|
||||||
(6) resultPath: 1:1比对结果存储地址
|
(6) eventDataPath: 用于1:1比对的购物事件特征存储地址、对应子图存储地址
|
||||||
|
(7) subimgPath: 1:1比对购物事件轨迹、标准barcode所对应的 subimgs 存储地址
|
||||||
|
(8) similPath: 1:1比对结果存储地址(事件级)
|
||||||
'''
|
'''
|
||||||
|
# stdSamplePath = r"\\192.168.1.28\share\已标注数据备份\对比数据\barcode\barcode_500_1979_已清洗"
|
||||||
stdSamplePath = r"\\192.168.1.28\share\已标注数据备份\对比数据\barcode\barcode_500_1979_已清洗"
|
# stdBarcodePath = r"\\192.168.1.28\share\测试_202406\contrast\std_barcodes_2192"
|
||||||
stdBarcodePath = r"\\192.168.1.28\share\测试_202406\contrast\std_barcodes_2192"
|
# stdFeaturePath = r"\\192.168.1.28\share\测试_202406\contrast\std_features_ft32"
|
||||||
stdFeaturePath = r"\\192.168.1.28\share\测试_202406\contrast\std_features_ft32"
|
# eventDataPath = r"\\192.168.1.28\share\测试_202406\contrast\events"
|
||||||
eventFeatPath = r"\\192.168.1.28\share\测试_202406\contrast\events"
|
# subimgPath = r'\\192.168.1.28\share\测试_202406\contrast\subimgs'
|
||||||
subimgPath = r'\\192.168.1.28\share\测试_202406\contrast\subimgs'
|
# similPath = r"D:\DetectTracking\contrast\result\pickle"
|
||||||
resultPath = r"D:\DetectTracking\contrast\result\pickle"
|
# eventSourcePath = [r'\\192.168.1.28\share\测试_202406\1101\images']
|
||||||
if not os.path.exists(resultPath):
|
|
||||||
os.makedirs(resultPath)
|
stdSamplePath = r"\\192.168.1.28\share\数据\已完成数据\展厅数据\v1.0\比对数据\整理\zhantingBase"
|
||||||
|
stdBarcodePath = r"D:\exhibition\dataset\bcdpath"
|
||||||
|
stdFeaturePath = r"D:\exhibition\dataset\feats"
|
||||||
|
resultPath = r"D:\exhibition\result\events"
|
||||||
|
# eventSourcePath = [r'D:\exhibition\images\20241202']
|
||||||
|
# eventSourcePath = [r"\\192.168.1.28\share\测试视频数据以及日志\各模块测试记录\展厅测试\1129_展厅模型v801测试组测试"]
|
||||||
|
eventSourcePath = [r"\\192.168.1.28\share\测试视频数据以及日志\各模块测试记录\展厅测试\1126_展厅模型v801测试"]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
'''定义当前事件存储地址及生成相应文件件'''
|
||||||
|
eventDataPath = os.path.join(resultPath, "1126", "evtobjs")
|
||||||
|
subimgPath = os.path.join(resultPath, "1126", "subimgs")
|
||||||
|
imagePath = os.path.join(resultPath, "1126", "image")
|
||||||
|
similPath = os.path.join(resultPath, "1126", "simidata")
|
||||||
|
|
||||||
|
if not os.path.exists(eventDataPath):
|
||||||
|
os.makedirs(eventDataPath)
|
||||||
|
if not os.path.exists(subimgPath):
|
||||||
|
os.makedirs(subimgPath)
|
||||||
|
if not os.path.exists(imagePath):
|
||||||
|
os.makedirs(imagePath)
|
||||||
|
if not os.path.exists(similPath):
|
||||||
|
os.makedirs(similPath)
|
||||||
|
|
||||||
test_one2one()
|
test_one2one()
|
||||||
|
|
||||||
|
@ -106,7 +106,9 @@ def test_compare():
|
|||||||
|
|
||||||
def one2one_pr(paths):
|
def one2one_pr(paths):
|
||||||
paths = Path(paths)
|
paths = Path(paths)
|
||||||
evtpaths = [p for p in paths.iterdir() if p.is_dir() and len(p.name.split('_'))>=2]
|
|
||||||
|
# evtpaths = [p for p in paths.iterdir() if p.is_dir() and len(p.name.split('_'))>=2]
|
||||||
|
evtpaths = [p for p in paths.iterdir() if p.is_dir()]
|
||||||
|
|
||||||
events, similars = [], []
|
events, similars = [], []
|
||||||
|
|
||||||
@ -120,14 +122,19 @@ def one2one_pr(paths):
|
|||||||
##===================================== 应用于1:n
|
##===================================== 应用于1:n
|
||||||
tpevents, fnevents, fpevents, tnevents = [], [], [], []
|
tpevents, fnevents, fpevents, tnevents = [], [], [], []
|
||||||
tpsimi, fnsimi, tnsimi, fpsimi = [], [], [], []
|
tpsimi, fnsimi, tnsimi, fpsimi = [], [], [], []
|
||||||
|
other_event, other_simi = [], []
|
||||||
|
|
||||||
|
##===================================== barcodes总数、比对错误事件
|
||||||
|
bcdList, one2onePath = [], []
|
||||||
for path in evtpaths:
|
for path in evtpaths:
|
||||||
barcode = path.stem.split('_')[-1]
|
barcode = path.stem.split('_')[-1]
|
||||||
datapath = path.joinpath('process.data')
|
datapath = path.joinpath('process.data')
|
||||||
|
|
||||||
if not barcode.isdigit() or len(barcode)<10: continue
|
if not barcode.isdigit() or len(barcode)<10: continue
|
||||||
if not datapath.is_file(): continue
|
if not datapath.is_file(): continue
|
||||||
|
|
||||||
|
bcdList.append(barcode)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
SimiDict = read_similar(datapath)
|
SimiDict = read_similar(datapath)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -150,13 +157,17 @@ def one2one_pr(paths):
|
|||||||
|
|
||||||
one2oneAA.extend(simAA)
|
one2oneAA.extend(simAA)
|
||||||
one2oneAB.extend(simAB)
|
one2oneAB.extend(simAB)
|
||||||
|
one2onePath.append(path.stem)
|
||||||
|
|
||||||
##===================================== 以下应用适用于展厅 1:N
|
##===================================== 以下应用适用于展厅 1:N
|
||||||
max_idx = similars.index(max(similars))
|
max_idx = similars.index(max(similars))
|
||||||
max_sim = similars[max_idx]
|
max_sim = similars[max_idx]
|
||||||
# max_bcd = barcodes[max_idx]
|
# max_bcd = barcodes[max_idx]
|
||||||
|
|
||||||
|
if path.stem.find('100321')>0:
|
||||||
|
print("hhh")
|
||||||
|
|
||||||
|
|
||||||
for i in range(len(one2one)):
|
for i in range(len(one2one)):
|
||||||
bcd, simi = barcodes[i], similars[i]
|
bcd, simi = barcodes[i], similars[i]
|
||||||
if bcd==barcode and simi==max_sim:
|
if bcd==barcode and simi==max_sim:
|
||||||
@ -172,7 +183,7 @@ def one2one_pr(paths):
|
|||||||
fp_simi.append(simi)
|
fp_simi.append(simi)
|
||||||
fp_events.append(path.stem)
|
fp_events.append(path.stem)
|
||||||
|
|
||||||
|
|
||||||
##===================================== 以下应用适用1:n
|
##===================================== 以下应用适用1:n
|
||||||
events, evt_barcodes, evt_similars, evt_types = [], [], [], []
|
events, evt_barcodes, evt_similars, evt_types = [], [], [], []
|
||||||
for dt in one2n:
|
for dt in one2n:
|
||||||
@ -197,9 +208,13 @@ def one2one_pr(paths):
|
|||||||
elif bcd!=barcode and simi!=maxsim:
|
elif bcd!=barcode and simi!=maxsim:
|
||||||
tnsimi.append(simi)
|
tnsimi.append(simi)
|
||||||
tnevents.append(path.stem)
|
tnevents.append(path.stem)
|
||||||
else:
|
elif bcd!=barcode and simi==maxsim:
|
||||||
fpsimi.append(simi)
|
fpsimi.append(simi)
|
||||||
fpevents.append(path.stem)
|
fpevents.append(path.stem)
|
||||||
|
else:
|
||||||
|
other_simi.append(simi)
|
||||||
|
other_event.append(path.stem)
|
||||||
|
|
||||||
|
|
||||||
'''命名规则:
|
'''命名规则:
|
||||||
1:1 1:n 1:N
|
1:1 1:n 1:N
|
||||||
@ -228,9 +243,12 @@ def one2one_pr(paths):
|
|||||||
FN_ = sum(np.array(one2oneAA) < th)
|
FN_ = sum(np.array(one2oneAA) < th)
|
||||||
TN_ = sum(np.array(one2oneAB) < th)
|
TN_ = sum(np.array(one2oneAB) < th)
|
||||||
PPrecise_.append(TP_/(TP_+FP_+1e-6))
|
PPrecise_.append(TP_/(TP_+FP_+1e-6))
|
||||||
PRecall_.append(TP_/(TP_+FN_+1e-6))
|
# PRecall_.append(TP_/(TP_+FN_+1e-6))
|
||||||
|
PRecall_.append(TP_/(len(one2oneAA)+1e-6))
|
||||||
|
|
||||||
NPrecise_.append(TN_/(TN_+FN_+1e-6))
|
NPrecise_.append(TN_/(TN_+FN_+1e-6))
|
||||||
NRecall_.append(TN_/(TN_+FP_+1e-6))
|
# NRecall_.append(TN_/(TN_+FP_+1e-6))
|
||||||
|
NRecall_.append(TN_/(len(one2oneAB)+1e-6))
|
||||||
|
|
||||||
'''============================= 1:n'''
|
'''============================= 1:n'''
|
||||||
TP = sum(np.array(tpsimi) >= th)
|
TP = sum(np.array(tpsimi) >= th)
|
||||||
@ -238,9 +256,12 @@ def one2one_pr(paths):
|
|||||||
FN = sum(np.array(fnsimi) < th)
|
FN = sum(np.array(fnsimi) < th)
|
||||||
TN = sum(np.array(tnsimi) < th)
|
TN = sum(np.array(tnsimi) < th)
|
||||||
PPrecise.append(TP/(TP+FP+1e-6))
|
PPrecise.append(TP/(TP+FP+1e-6))
|
||||||
PRecall.append(TP/(TP+FN+1e-6))
|
# PRecall.append(TP/(TP+FN+1e-6))
|
||||||
|
PRecall.append(TP/(len(tpsimi)+len(fnsimi)+1e-6))
|
||||||
|
|
||||||
NPrecise.append(TN/(TN+FN+1e-6))
|
NPrecise.append(TN/(TN+FN+1e-6))
|
||||||
NRecall.append(TN/(TN+FP+1e-6))
|
# NRecall.append(TN/(TN+FP+1e-6))
|
||||||
|
NRecall.append(TN/(len(tnsimi)+len(fpsimi)+1e-6))
|
||||||
|
|
||||||
|
|
||||||
'''============================= 1:N 展厅'''
|
'''============================= 1:N 展厅'''
|
||||||
@ -249,9 +270,12 @@ def one2one_pr(paths):
|
|||||||
FNX = sum(np.array(fn_simi) < th)
|
FNX = sum(np.array(fn_simi) < th)
|
||||||
TNX = sum(np.array(tn_simi) < th)
|
TNX = sum(np.array(tn_simi) < th)
|
||||||
PPreciseX.append(TPX/(TPX+FPX+1e-6))
|
PPreciseX.append(TPX/(TPX+FPX+1e-6))
|
||||||
PRecallX.append(TPX/(TPX+FNX+1e-6))
|
# PRecallX.append(TPX/(TPX+FNX+1e-6))
|
||||||
|
PRecallX.append(TPX/(len(tp_simi)+len(fn_simi)+1e-6))
|
||||||
|
|
||||||
NPreciseX.append(TNX/(TNX+FNX+1e-6))
|
NPreciseX.append(TNX/(TNX+FNX+1e-6))
|
||||||
NRecallX.append(TNX/(TNX+FPX+1e-6))
|
# NRecallX.append(TNX/(TNX+FPX+1e-6))
|
||||||
|
NRecallX.append(TNX/(len(tn_simi)+len(fp_simi)+1e-6))
|
||||||
|
|
||||||
'''============================= 1:1 曲线'''
|
'''============================= 1:1 曲线'''
|
||||||
fig, ax = plt.subplots()
|
fig, ax = plt.subplots()
|
||||||
@ -262,8 +286,8 @@ def one2one_pr(paths):
|
|||||||
ax.set_xlim([0, 1])
|
ax.set_xlim([0, 1])
|
||||||
ax.set_ylim([0, 1])
|
ax.set_ylim([0, 1])
|
||||||
ax.grid(True)
|
ax.grid(True)
|
||||||
ax.set_title('Precise & Recall')
|
ax.set_title('1:1 Precise & Recall')
|
||||||
ax.set_xlabel(f"Num: {len(evtpaths)}")
|
ax.set_xlabel(f"Event Num: {len(one2oneAA)}")
|
||||||
ax.legend()
|
ax.legend()
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
@ -286,8 +310,8 @@ def one2one_pr(paths):
|
|||||||
ax.set_xlim([0, 1])
|
ax.set_xlim([0, 1])
|
||||||
ax.set_ylim([0, 1])
|
ax.set_ylim([0, 1])
|
||||||
ax.grid(True)
|
ax.grid(True)
|
||||||
ax.set_title('Precise & Recall')
|
ax.set_title('1:n Precise & Recall')
|
||||||
ax.set_xlabel(f"Num: {len(evtpaths)}")
|
ax.set_xlabel(f"Event Num: {len(one2oneAA)}")
|
||||||
ax.legend()
|
ax.legend()
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
@ -317,8 +341,8 @@ def one2one_pr(paths):
|
|||||||
ax.set_xlim([0, 1])
|
ax.set_xlim([0, 1])
|
||||||
ax.set_ylim([0, 1])
|
ax.set_ylim([0, 1])
|
||||||
ax.grid(True)
|
ax.grid(True)
|
||||||
ax.set_title('Precise & Recall')
|
ax.set_title('1:N Precise & Recall')
|
||||||
ax.set_xlabel(f"Num: {len(evtpaths)}")
|
ax.set_xlabel(f"Event Num: {len(one2oneAA)}")
|
||||||
ax.legend()
|
ax.legend()
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
@ -338,16 +362,23 @@ def one2one_pr(paths):
|
|||||||
axes[1, 1].set_title('FN')
|
axes[1, 1].set_title('FN')
|
||||||
plt.show()
|
plt.show()
|
||||||
|
|
||||||
|
# bcdSet = set(bcdList)
|
||||||
|
# one2nErrFile = str(paths.joinpath("one_2_Small_n_Error.txt"))
|
||||||
|
# with open(one2nErrFile, "w") as file:
|
||||||
|
# for item in fnevents:
|
||||||
|
# file.write(item + "\n")
|
||||||
|
|
||||||
|
# one2NErrFile = str(paths.joinpath("one_2_Big_N_Error.txt"))
|
||||||
|
# with open(one2NErrFile, "w") as file:
|
||||||
|
# for item in fn_events:
|
||||||
|
# file.write(item + "\n")
|
||||||
|
|
||||||
print('Done!')
|
print('Done!')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
evtpaths = r"\\192.168.1.28\share\测试视频数据以及日志\各模块测试记录\展厅测试\1120_展厅模型v801测试\扫A放A"
|
evtpaths = r"\\192.168.1.28\share\测试视频数据以及日志\各模块测试记录\展厅测试\1129_展厅模型v801测试组测试"
|
||||||
one2one_pr(evtpaths)
|
one2one_pr(evtpaths)
|
||||||
|
|
||||||
|
|
||||||
|
BIN
contrast/utils/__pycache__/event.cpython-39.pyc
Normal file
BIN
contrast/utils/__pycache__/event.cpython-39.pyc
Normal file
Binary file not shown.
179
contrast/utils/event.py
Normal file
179
contrast/utils/event.py
Normal file
@ -0,0 +1,179 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
"""
|
||||||
|
Created on Tue Nov 26 17:35:05 2024
|
||||||
|
|
||||||
|
@author: ym
|
||||||
|
"""
|
||||||
|
import os
|
||||||
|
import numpy as np
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import sys
|
||||||
|
sys.path.append(r"D:\DetectTracking")
|
||||||
|
from tracking.utils.read_data import extract_data, read_tracking_output, read_similar
|
||||||
|
|
||||||
|
IMG_FORMAT = ['.bmp', '.jpg', '.jpeg', '.png']
|
||||||
|
VID_FORMAT = ['.mp4', '.avi']
|
||||||
|
|
||||||
|
class Event:
|
||||||
|
def __init__(self, eventpath, stype="data"):
|
||||||
|
'''stype: str, 'video', 'image', 'data', '''
|
||||||
|
|
||||||
|
self.eventpath = eventpath
|
||||||
|
self.evtname = str(Path(eventpath).stem)
|
||||||
|
self.barcode = ''
|
||||||
|
self.evtType = ''
|
||||||
|
|
||||||
|
'''=========== path of image and video =========== '''
|
||||||
|
self.back_videopath = ''
|
||||||
|
self.front_videopath = ''
|
||||||
|
self.back_imgpaths = []
|
||||||
|
self.front_imgpaths = []
|
||||||
|
|
||||||
|
'''=========== process.data ==============================='''
|
||||||
|
self.one2one = None
|
||||||
|
self.one2n = None
|
||||||
|
|
||||||
|
'''=========== 0/1_track.data ============================='''
|
||||||
|
self.back_yolobboxes = np.empty((0, 6), dtype=np.float64)
|
||||||
|
self.back_yolofeats = np.empty((0, 256), dtype=np.float64)
|
||||||
|
self.back_trackerboxes = np.empty((0, 9), dtype=np.float64)
|
||||||
|
self.back_trackerfeats = np.empty((0, 256), dtype=np.float64)
|
||||||
|
self.back_trackingboxes = np.empty((0, 9), dtype=np.float64)
|
||||||
|
self.back_trackingfeats = np.empty((0, 256), dtype=np.float64)
|
||||||
|
|
||||||
|
self.front_yolobboxes = np.empty((0, 6), dtype=np.float64)
|
||||||
|
self.front_yolofeats = np.empty((0, 256), dtype=np.float64)
|
||||||
|
self.front_trackerboxes = np.empty((0, 9), dtype=np.float64)
|
||||||
|
self.front_trackerfeats = np.empty((0, 256), dtype=np.float64)
|
||||||
|
self.front_trackingboxes = np.empty((0, 9), dtype=np.float64)
|
||||||
|
self.front_trackingfeats = np.empty((0, 256), dtype=np.float64)
|
||||||
|
|
||||||
|
'''=========== 0/1_tracking_output.data ==================='''
|
||||||
|
self.back_boxes = np.empty((0, 9), dtype=np.float64)
|
||||||
|
self.front_boxes = np.empty((0, 9), dtype=np.float64)
|
||||||
|
self.back_feats = np.empty((0, 256), dtype=np.float64)
|
||||||
|
self.front_feats = np.empty((0, 256), dtype=np.float64)
|
||||||
|
self.feats_compose = np.empty((0, 256), dtype=np.float64)
|
||||||
|
self.feats_select = np.empty((0, 256), dtype=np.float64)
|
||||||
|
|
||||||
|
if stype=="data":
|
||||||
|
self.from_datafile(eventpath)
|
||||||
|
|
||||||
|
if stype=="video":
|
||||||
|
self.from_video(eventpath)
|
||||||
|
|
||||||
|
if stype=="image":
|
||||||
|
self.from_image(eventpath)
|
||||||
|
|
||||||
|
def from_datafile(self, eventpath):
|
||||||
|
evtList = self.evtname.split('_')
|
||||||
|
if len(evtList)>=2 and len(evtList[-1])>=10 and evtList[-1].isdigit():
|
||||||
|
self.barcode = evtList[-1]
|
||||||
|
if len(evtList)==3 and evtList[-1]== evtList[-2]:
|
||||||
|
self.evtType = 'input'
|
||||||
|
else:
|
||||||
|
self.evtType = 'other'
|
||||||
|
|
||||||
|
'''================ path of image ============='''
|
||||||
|
frontImgs, frontFid = [], []
|
||||||
|
backImgs, backFid = [], []
|
||||||
|
for imgname in os.listdir(eventpath):
|
||||||
|
name, ext = os.path.splitext(imgname)
|
||||||
|
if ext not in IMG_FORMAT or name.find('frameId') < 0: continue
|
||||||
|
if len(name.split('_')) != 3 and not name.split('_')[3].isdigit(): continue
|
||||||
|
|
||||||
|
CamerType = name.split('_')[0]
|
||||||
|
frameId = int(name.split('_')[3])
|
||||||
|
imgpath = os.path.join(eventpath, imgname)
|
||||||
|
if CamerType == '0':
|
||||||
|
backImgs.append(imgpath)
|
||||||
|
backFid.append(frameId)
|
||||||
|
if CamerType == '1':
|
||||||
|
frontImgs.append(imgpath)
|
||||||
|
frontFid.append(frameId)
|
||||||
|
## 生成依据帧 ID 排序的前后摄图像地址列表
|
||||||
|
frontIdx = np.argsort(np.array(frontFid))
|
||||||
|
backIdx = np.argsort(np.array(backFid))
|
||||||
|
self.front_imgpaths = [frontImgs[i] for i in frontIdx]
|
||||||
|
self.back_imgpaths = [backImgs[i] for i in backIdx]
|
||||||
|
|
||||||
|
|
||||||
|
'''================ path of video ============='''
|
||||||
|
for vidname in os.listdir(eventpath):
|
||||||
|
name, ext = os.path.splitext(vidname)
|
||||||
|
if ext not in VID_FORMAT: continue
|
||||||
|
vidpath = os.path.join(eventpath, vidname)
|
||||||
|
|
||||||
|
CamerType = name.split('_')[0]
|
||||||
|
if CamerType == '0':
|
||||||
|
self.back_videopath = vidpath
|
||||||
|
if CamerType == '1':
|
||||||
|
self.front_videopath = vidpath
|
||||||
|
|
||||||
|
'''================ process.data ============='''
|
||||||
|
procpath = Path(eventpath).joinpath('process.data')
|
||||||
|
if procpath.is_file():
|
||||||
|
SimiDict = read_similar(procpath)
|
||||||
|
self.one2one = SimiDict['one2one']
|
||||||
|
self.one2n = SimiDict['one2n']
|
||||||
|
|
||||||
|
|
||||||
|
'''=========== 0/1_track.data & 0/1_tracking_output.data ======='''
|
||||||
|
for dataname in os.listdir(eventpath):
|
||||||
|
datapath = os.path.join(eventpath, dataname)
|
||||||
|
if not os.path.isfile(datapath): continue
|
||||||
|
CamerType = dataname.split('_')[0]
|
||||||
|
|
||||||
|
'''========== 0/1_track.data =========='''
|
||||||
|
if dataname.find("_track.data")>0:
|
||||||
|
bboxes, ffeats, trackerboxes, tracker_feat_dict, trackingboxes, tracking_feat_dict = extract_data(datapath)
|
||||||
|
if CamerType == '0':
|
||||||
|
self.back_yolobboxes = bboxes
|
||||||
|
self.back_yolofeats = ffeats
|
||||||
|
self.back_trackerboxes = trackerboxes
|
||||||
|
self.back_trackerfeats = tracker_feat_dict
|
||||||
|
self.back_trackingboxes = trackingboxes
|
||||||
|
self.back_trackingfeats = tracking_feat_dict
|
||||||
|
if CamerType == '1':
|
||||||
|
self.front_yolobboxes = bboxes
|
||||||
|
self.front_yolofeats = ffeats
|
||||||
|
self.front_trackerboxes = trackerboxes
|
||||||
|
self.front_trackerfeats = tracker_feat_dict
|
||||||
|
self.front_trackingboxes = trackingboxes
|
||||||
|
self.front_trackingfeats = tracking_feat_dict
|
||||||
|
|
||||||
|
'''========== 0/1_tracking_output.data =========='''
|
||||||
|
if dataname.find("_tracking_output.data")>0:
|
||||||
|
tracking_output_boxes, tracking_output_feats = read_tracking_output(datapath)
|
||||||
|
if CamerType == '0':
|
||||||
|
self.back_boxes = tracking_output_boxes
|
||||||
|
self.back_feats = tracking_output_feats
|
||||||
|
elif CamerType == '1':
|
||||||
|
self.front_boxes = tracking_output_boxes
|
||||||
|
self.front_feats = tracking_output_feats
|
||||||
|
self.select_feat()
|
||||||
|
self.compose_feats()
|
||||||
|
|
||||||
|
|
||||||
|
def compose_feats(self):
|
||||||
|
'''事件的特征集成'''
|
||||||
|
feats_compose = np.empty((0, 256), dtype=np.float64)
|
||||||
|
if len(self.front_feats):
|
||||||
|
feats_compose = np.concatenate((feats_compose, self.front_feats), axis=0)
|
||||||
|
if len(self.back_feats):
|
||||||
|
feats_compose = np.concatenate((feats_compose, self.back_feats), axis=0)
|
||||||
|
self.feats_compose = feats_compose
|
||||||
|
|
||||||
|
def select_feats(self):
|
||||||
|
'''事件的特征选择'''
|
||||||
|
if len(self.front_feats):
|
||||||
|
self.feats_select = self.front_feats
|
||||||
|
else:
|
||||||
|
self.feats_select = self.back_feats
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
82
pipeline.py
82
pipeline.py
@ -5,11 +5,19 @@ Created on Sun Sep 29 08:59:21 2024
|
|||||||
@author: ym
|
@author: ym
|
||||||
"""
|
"""
|
||||||
import os
|
import os
|
||||||
|
import sys
|
||||||
import cv2
|
import cv2
|
||||||
import pickle
|
import pickle
|
||||||
|
import argparse
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from track_reid import parse_opt, yolo_resnet_tracker
|
from track_reid import parse_opt
|
||||||
|
from track_reid import yolo_resnet_tracker
|
||||||
|
# FILE = Path(__file__).resolve()
|
||||||
|
# ROOT = FILE.parents[0] # YOLOv5 root directory
|
||||||
|
# if str(ROOT) not in sys.path:
|
||||||
|
# sys.path.append(str(ROOT)) # add ROOT to PATH
|
||||||
|
# ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
||||||
|
|
||||||
from tracking.dotrack.dotracks_back import doBackTracks
|
from tracking.dotrack.dotracks_back import doBackTracks
|
||||||
from tracking.dotrack.dotracks_front import doFrontTracks
|
from tracking.dotrack.dotracks_front import doFrontTracks
|
||||||
@ -35,38 +43,27 @@ def get_interbcd_inputenents():
|
|||||||
|
|
||||||
return input_enents
|
return input_enents
|
||||||
|
|
||||||
def pipeline(eventpath, stdfeat_path=None, SourceType = "image"):
|
def pipeline(
|
||||||
'''
|
eventpath,
|
||||||
inputs:
|
savepath,
|
||||||
eventpath: 事件文件夹
|
SourceType = "image", # video
|
||||||
stdfeat_path: 标准特征文件地址
|
stdfeat_path = None
|
||||||
outputs:
|
):
|
||||||
|
|
||||||
'''
|
|
||||||
# SourceType = "image" # image
|
|
||||||
# eventpath = r"\\192.168.1.28\share\测试_202406\0918\images1\20240918-110822-1bc3902e-5a8e-4e23-8eca-fb3f02738551_6938314601726"
|
|
||||||
|
|
||||||
savepath = r"D:\contrast\detect"
|
|
||||||
|
|
||||||
opt = parse_opt()
|
|
||||||
optdict = vars(opt)
|
|
||||||
optdict["project"] = savepath
|
|
||||||
|
|
||||||
eventname = os.path.basename(eventpath)
|
|
||||||
# barcode = eventname.split('_')[-1]
|
|
||||||
|
|
||||||
|
|
||||||
if SourceType == "video":
|
if SourceType == "video":
|
||||||
vpaths = get_video_pairs(eventpath)
|
vpaths = get_video_pairs(eventpath)
|
||||||
elif SourceType == "image":
|
elif SourceType == "image":
|
||||||
vpaths = get_image_pairs(eventpath)
|
vpaths = get_image_pairs(eventpath)
|
||||||
|
|
||||||
|
'''======== 函数 yolo_resnet_tracker() 的参数字典 ========'''
|
||||||
|
opt = parse_opt()
|
||||||
|
optdict = vars(opt)
|
||||||
|
optdict["is_save_img"] = True
|
||||||
|
optdict["is_save_video"] = True
|
||||||
|
|
||||||
event_tracks = []
|
event_tracks = []
|
||||||
for vpath in vpaths:
|
for vpath in vpaths:
|
||||||
'''事件结果文件夹'''
|
'''事件结果文件夹'''
|
||||||
save_dir_event = Path(savepath) / Path(eventname)
|
save_dir_event = Path(savepath) / Path(os.path.basename(eventpath))
|
||||||
if isinstance(vpath, list):
|
if isinstance(vpath, list):
|
||||||
save_dir_video = save_dir_event / Path("images")
|
save_dir_video = save_dir_event / Path("images")
|
||||||
else:
|
else:
|
||||||
@ -78,8 +75,7 @@ def pipeline(eventpath, stdfeat_path=None, SourceType = "image"):
|
|||||||
'''Yolo + Resnet + Tracker'''
|
'''Yolo + Resnet + Tracker'''
|
||||||
optdict["source"] = vpath
|
optdict["source"] = vpath
|
||||||
optdict["save_dir"] = save_dir_video
|
optdict["save_dir"] = save_dir_video
|
||||||
optdict["is_save_img"] = True
|
|
||||||
optdict["is_save_video"] = True
|
|
||||||
|
|
||||||
tracksdict = yolo_resnet_tracker(**optdict)
|
tracksdict = yolo_resnet_tracker(**optdict)
|
||||||
|
|
||||||
@ -138,6 +134,7 @@ def pipeline(eventpath, stdfeat_path=None, SourceType = "image"):
|
|||||||
|
|
||||||
|
|
||||||
'''前后摄轨迹选择'''
|
'''前后摄轨迹选择'''
|
||||||
|
|
||||||
if stdfeat_path is not None:
|
if stdfeat_path is not None:
|
||||||
with open(stdfeat_path, 'rb') as f:
|
with open(stdfeat_path, 'rb') as f:
|
||||||
featDict = pickle.load(f)
|
featDict = pickle.load(f)
|
||||||
@ -171,22 +168,29 @@ def main_loop():
|
|||||||
stdfeat_path = os.path.join(bcdpath, f"{bcd}.pickle")
|
stdfeat_path = os.path.join(bcdpath, f"{bcd}.pickle")
|
||||||
input_enents.append((event_path, stdfeat_path))
|
input_enents.append((event_path, stdfeat_path))
|
||||||
|
|
||||||
|
parmDict = {}
|
||||||
|
parmDict["SourceType"] = "image"
|
||||||
|
parmDict["savepath"] = r"D:\contrast\detect"
|
||||||
for eventpath, stdfeat_path in input_enents:
|
for eventpath, stdfeat_path in input_enents:
|
||||||
pipeline(eventpath, stdfeat_path, SourceType)
|
parmDict["eventpath"] = eventpath
|
||||||
|
parmDict["stdfeat_path"] = stdfeat_path
|
||||||
|
|
||||||
|
pipeline(**parmDict)
|
||||||
def main():
|
|
||||||
eventpath = r"D:\datasets\ym\exhibition\175836"
|
|
||||||
|
|
||||||
eventpath = r"\\192.168.1.28\share\测试视频数据以及日志\各模块测试记录\展厅测试\1120_展厅模型v801测试\扫A放A\20241121-144855-dce94b09-1100-43f1-92e8-33a1b538b159_6924743915848_6924743915848"
|
|
||||||
|
|
||||||
SourceType = 'image'
|
|
||||||
stdfeat_path = None
|
|
||||||
|
|
||||||
pipeline(eventpath, stdfeat_path, SourceType)
|
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
'''
|
||||||
|
函数:pipeline(),遍历事件文件夹,选择类型 image 或 video,
|
||||||
|
'''
|
||||||
|
parmDict = {}
|
||||||
|
parmDict["eventpath"] = r"\\192.168.1.28\share\测试视频数据以及日志\各模块测试记录\展厅测试\1120_展厅模型v801测试\扫A放A\20241121-144855-dce94b09-1100-43f1-92e8-33a1b538b159_6924743915848_6924743915848"
|
||||||
|
|
||||||
|
parmDict["savepath"] = r"D:\contrast\detect"
|
||||||
|
parmDict["SourceType"] = "image" # video, image
|
||||||
|
parmDict["stdfeat_path"] = None
|
||||||
|
|
||||||
|
pipeline(**parmDict)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
@ -204,11 +204,6 @@ def state_measure(periods, weights, hands, spath=None):
|
|||||||
|
|
||||||
state1 = frstate_1[:,2][:, None]
|
state1 = frstate_1[:,2][:, None]
|
||||||
state11 = frstate_1[:,3][:, None]
|
state11 = frstate_1[:,3][:, None]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -428,8 +423,8 @@ def run_tracking(trackboxes, MotionSlice):
|
|||||||
|
|
||||||
|
|
||||||
def show_seri():
|
def show_seri():
|
||||||
datapath = r"\\192.168.1.28\share\realtime\eventdata\1731316835560"
|
datapath = r"\\192.168.1.28\share\个人文件\wqg\realtime\eventdata\1731316835560"
|
||||||
savedir = r"D:\DetectTracking\realtime"
|
savedir = r"D:\DetectTracking\realtime\1"
|
||||||
|
|
||||||
|
|
||||||
imgdir = datapath.split('\\')[-2] + "_" + datapath.split('\\')[-1]
|
imgdir = datapath.split('\\')[-2] + "_" + datapath.split('\\')[-1]
|
||||||
@ -475,7 +470,7 @@ def show_seri():
|
|||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
runyolo()
|
# runyolo()
|
||||||
|
|
||||||
show_seri()
|
show_seri()
|
||||||
|
|
||||||
|
156
track_reid.py
156
track_reid.py
@ -64,6 +64,9 @@ from contrast.feat_extract.config import config as conf
|
|||||||
from contrast.feat_extract.inference import FeatsInterface
|
from contrast.feat_extract.inference import FeatsInterface
|
||||||
ReIDEncoder = FeatsInterface(conf)
|
ReIDEncoder = FeatsInterface(conf)
|
||||||
|
|
||||||
|
IMG_FORMATS = '.bmp', '.dng', '.jpeg', '.jpg', '.mpo', '.png', '.tif', '.tiff', '.webp', '.pfm' # include image suffixes
|
||||||
|
VID_FORMATS = '.asf', '.avi', '.gif', '.m4v', '.mkv', '.mov', '.mp4', '.mpeg', '.mpg', '.ts', '.wmv' # include video suffixes
|
||||||
|
|
||||||
# from tracking.trackers.reid.reid_interface import ReIDInterface
|
# from tracking.trackers.reid.reid_interface import ReIDInterface
|
||||||
# from tracking.trackers.reid.config import config as ReIDConfig
|
# from tracking.trackers.reid.config import config as ReIDConfig
|
||||||
# ReIDEncoder = ReIDInterface(ReIDConfig)
|
# ReIDEncoder = ReIDInterface(ReIDConfig)
|
||||||
@ -141,6 +144,9 @@ def yolo_resnet_tracker(
|
|||||||
name='exp', # save results to project/name
|
name='exp', # save results to project/name
|
||||||
save_dir = '',
|
save_dir = '',
|
||||||
|
|
||||||
|
is_save_img = False,
|
||||||
|
is_save_video = True,
|
||||||
|
|
||||||
tracker_yaml = "./tracking/trackers/cfg/botsort.yaml",
|
tracker_yaml = "./tracking/trackers/cfg/botsort.yaml",
|
||||||
imgsz=(640, 640), # inference size (height, width)
|
imgsz=(640, 640), # inference size (height, width)
|
||||||
conf_thres=0.25, # confidence threshold
|
conf_thres=0.25, # confidence threshold
|
||||||
@ -153,18 +159,15 @@ def yolo_resnet_tracker(
|
|||||||
save_csv=False, # save results in CSV format
|
save_csv=False, # save results in CSV format
|
||||||
save_conf=False, # save confidences in --save-txt labels
|
save_conf=False, # save confidences in --save-txt labels
|
||||||
save_crop=False, # save cropped prediction boxes
|
save_crop=False, # save cropped prediction boxes
|
||||||
|
|
||||||
nosave=False, # do not save images/videos
|
nosave=False, # do not save images/videos
|
||||||
is_save_img = False,
|
update=False, # update all models
|
||||||
is_save_video = True,
|
exist_ok=False, # existing project/name ok, do not increment
|
||||||
|
|
||||||
|
|
||||||
classes=None, # filter by class: --class 0, or --class 0 2 3
|
classes=None, # filter by class: --class 0, or --class 0 2 3
|
||||||
agnostic_nms=False, # class-agnostic NMS
|
agnostic_nms=False, # class-agnostic NMS
|
||||||
augment=False, # augmented inference
|
augment=False, # augmented inference
|
||||||
visualize=False, # visualize features
|
visualize=False, # visualize features
|
||||||
update=False, # update all models
|
|
||||||
exist_ok=False, # existing project/name ok, do not increment
|
|
||||||
line_thickness=3, # bounding box thickness (pixels)
|
line_thickness=3, # bounding box thickness (pixels)
|
||||||
hide_labels=False, # hide labels
|
hide_labels=False, # hide labels
|
||||||
hide_conf=False, # hide confidencesL
|
hide_conf=False, # hide confidencesL
|
||||||
@ -179,6 +182,8 @@ def yolo_resnet_tracker(
|
|||||||
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
|
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
|
||||||
stride, names, pt = model.stride, model.names, model.pt
|
stride, names, pt = model.stride, model.names, model.pt
|
||||||
imgsz = check_img_size(imgsz, s=stride) # check image size
|
imgsz = check_img_size(imgsz, s=stride) # check image size
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Dataloader
|
# Dataloader
|
||||||
bs = 1 # batch_size
|
bs = 1 # batch_size
|
||||||
@ -203,8 +208,8 @@ def yolo_resnet_tracker(
|
|||||||
|
|
||||||
# Inference
|
# Inference
|
||||||
with dt[1]:
|
with dt[1]:
|
||||||
visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
|
visualize = increment_path(project / Path(path).stem, mkdir=True) if visualize else False
|
||||||
pred = model(im, augment=augment, visualize=visualize)
|
pred = model(im, augment=augment, visualize=False)
|
||||||
|
|
||||||
# NMS
|
# NMS
|
||||||
with dt[2]:
|
with dt[2]:
|
||||||
@ -273,19 +278,27 @@ def yolo_resnet_tracker(
|
|||||||
annotator.box_label(xyxy, label, color=color)
|
annotator.box_label(xyxy, label, color=color)
|
||||||
|
|
||||||
'''====== Save results (image and video) ======'''
|
'''====== Save results (image and video) ======'''
|
||||||
save_path = str(save_dir / Path(path).name) # 带有后缀名
|
# save_path = str(save_dir / Path(path).name) # 带有后缀名
|
||||||
im0 = annotator.result()
|
im0 = annotator.result()
|
||||||
if is_save_img:
|
if is_save_img:
|
||||||
save_path_img, ext = os.path.splitext(save_path)
|
save_path_img = str(save_dir / Path(path).stem)
|
||||||
if dataset.mode == 'image':
|
if dataset.mode == 'image':
|
||||||
imgpath = save_path_img + ".png"
|
imgpath = save_path_img + ".png"
|
||||||
else:
|
else:
|
||||||
imgpath = save_path_img + f"_{frameId}.png"
|
imgpath = save_path_img + f"_{frameId}.png"
|
||||||
cv2.imwrite(Path(imgpath), im0)
|
cv2.imwrite(Path(imgpath), im0)
|
||||||
|
|
||||||
if dataset.mode == 'video' and is_save_video:
|
# if dataset.mode == 'video' and is_save_video:
|
||||||
if vid_path[i] != save_path: # new video
|
|
||||||
vid_path[i] = save_path
|
if is_save_video:
|
||||||
|
if dataset.mode == 'video':
|
||||||
|
vdieo_path = str(save_dir / Path(path).stem) + '.mp4' # 带有后缀名
|
||||||
|
else:
|
||||||
|
videoname = str(Path(path).stem).split('_')[0] + '.mp4'
|
||||||
|
vdieo_path = str(save_dir / videoname)
|
||||||
|
|
||||||
|
if vid_path[i] != vdieo_path: # new video
|
||||||
|
vid_path[i] = vdieo_path
|
||||||
if isinstance(vid_writer[i], cv2.VideoWriter):
|
if isinstance(vid_writer[i], cv2.VideoWriter):
|
||||||
vid_writer[i].release() # release previous video writer
|
vid_writer[i].release() # release previous video writer
|
||||||
if vid_cap: # video
|
if vid_cap: # video
|
||||||
@ -293,9 +306,9 @@ def yolo_resnet_tracker(
|
|||||||
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||||
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||||
else: # stream
|
else: # stream
|
||||||
fps, w, h = 30, im0.shape[1], im0.shape[0]
|
fps, w, h = 25, im0.shape[1], im0.shape[0]
|
||||||
save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
|
vdieo_path = str(Path(vdieo_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
|
||||||
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
vid_writer[i] = cv2.VideoWriter(vdieo_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
||||||
vid_writer[i].write(im0)
|
vid_writer[i].write(im0)
|
||||||
|
|
||||||
# Print time (inference-only)
|
# Print time (inference-only)
|
||||||
@ -344,6 +357,9 @@ def run(
|
|||||||
vid_stride=1, # video frame-rate stride
|
vid_stride=1, # video frame-rate stride
|
||||||
data=ROOT / 'data/coco128.yaml', # dataset.yaml path
|
data=ROOT / 'data/coco128.yaml', # dataset.yaml path
|
||||||
):
|
):
|
||||||
|
'''
|
||||||
|
source: 视频文件或图像列表
|
||||||
|
'''
|
||||||
source = str(source)
|
source = str(source)
|
||||||
# filename = os.path.split(source)[-1]
|
# filename = os.path.split(source)[-1]
|
||||||
|
|
||||||
@ -355,6 +371,9 @@ def run(
|
|||||||
if is_url and is_file:
|
if is_url and is_file:
|
||||||
source = check_file(source) # download
|
source = check_file(source) # download
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# spth = source.split('\\')[-2] + "_" + Path(source).stem
|
# spth = source.split('\\')[-2] + "_" + Path(source).stem
|
||||||
save_dir = Path(project) / Path(source.split('\\')[-2] + "_" + str(Path(source).stem))
|
save_dir = Path(project) / Path(source.split('\\')[-2] + "_" + str(Path(source).stem))
|
||||||
@ -440,8 +459,7 @@ def run(
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
p = Path(p) # to Path
|
|
||||||
save_path = str(save_dir / p.name) # im.jpg
|
|
||||||
s += '%gx%g ' % im.shape[2:] # print string
|
s += '%gx%g ' % im.shape[2:] # print string
|
||||||
|
|
||||||
# im0_ant = im0.copy()
|
# im0_ant = im0.copy()
|
||||||
@ -552,28 +570,33 @@ def run(
|
|||||||
|
|
||||||
# Save results (image and video with tracking)
|
# Save results (image and video with tracking)
|
||||||
im0 = annotator.result()
|
im0 = annotator.result()
|
||||||
save_path_img, ext = os.path.splitext(save_path)
|
|
||||||
|
p = Path(p) # to Path
|
||||||
|
save_path = str(save_dir / p.name) # im.jpg
|
||||||
if save_img:
|
if save_img:
|
||||||
|
save_path_img, ext = os.path.splitext(save_path)
|
||||||
if dataset.mode == 'image':
|
if dataset.mode == 'image':
|
||||||
imgpath = save_path_img + f"_{dataset}.png"
|
imgpath = save_path_img + ".png"
|
||||||
else:
|
else:
|
||||||
imgpath = save_path_img + f"_{frameId}.png"
|
imgpath = save_path_img + f"_{frameId}.png"
|
||||||
|
|
||||||
cv2.imwrite(Path(imgpath), im0)
|
cv2.imwrite(Path(imgpath), im0)
|
||||||
|
|
||||||
if vid_path[i] != save_path: # new video
|
if dataset.mode == 'video':
|
||||||
vid_path[i] = save_path
|
|
||||||
if isinstance(vid_writer[i], cv2.VideoWriter):
|
if vid_path[i] != save_path: # new video
|
||||||
vid_writer[i].release() # release previous video writer
|
vid_path[i] = save_path
|
||||||
if vid_cap: # video
|
if isinstance(vid_writer[i], cv2.VideoWriter):
|
||||||
fps = vid_cap.get(cv2.CAP_PROP_FPS)
|
vid_writer[i].release() # release previous video writer
|
||||||
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
if vid_cap: # video
|
||||||
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
fps = vid_cap.get(cv2.CAP_PROP_FPS)
|
||||||
else: # stream
|
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||||
fps, w, h = 30, im0.shape[1], im0.shape[0]
|
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||||
save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
|
else: # stream
|
||||||
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
fps, w, h = 30, im0.shape[1], im0.shape[0]
|
||||||
vid_writer[i].write(im0)
|
save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
|
||||||
|
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
||||||
|
vid_writer[i].write(im0)
|
||||||
|
|
||||||
|
|
||||||
# Print time (inference-only)
|
# Print time (inference-only)
|
||||||
LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
|
LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
|
||||||
@ -672,40 +695,25 @@ def parse_opt():
|
|||||||
print_args(vars(opt))
|
print_args(vars(opt))
|
||||||
return opt
|
return opt
|
||||||
|
|
||||||
def find_files_in_nested_dirs(root_dir):
|
def find_video_imgs(root_dir):
|
||||||
all_files = []
|
all_files = []
|
||||||
extensions = ['.mp4']
|
extensions = ['.mp4']
|
||||||
for dirpath, dirnames, filenames in os.walk(root_dir):
|
for dirpath, dirnames, filenames in os.walk(root_dir):
|
||||||
for filename in filenames:
|
for filename in filenames:
|
||||||
file, ext = os.path.splitext(filename)
|
file, ext = os.path.splitext(filename)
|
||||||
if ext in extensions:
|
if ext in IMG_FORMATS + VID_FORMATS:
|
||||||
all_files.append(os.path.join(dirpath, filename))
|
all_files.append(os.path.join(dirpath, filename))
|
||||||
return all_files
|
return all_files
|
||||||
|
|
||||||
print('=======')
|
|
||||||
|
|
||||||
def main(opt):
|
|
||||||
check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
|
|
||||||
optdict = vars(opt)
|
|
||||||
|
|
||||||
p = r"D:\datasets\ym"
|
|
||||||
p = r"D:\exhibition\images\153112511_0_seek_105.mp4"
|
|
||||||
|
|
||||||
optdict["project"] = r"D:\exhibition\result"
|
|
||||||
|
|
||||||
files = []
|
|
||||||
if os.path.isdir(p):
|
|
||||||
files.extend(sorted(glob.glob(os.path.join(p, '*.*'))))
|
|
||||||
optdict["source"] = files
|
|
||||||
elif os.path.isfile(p):
|
|
||||||
optdict["source"] = p
|
|
||||||
|
|
||||||
run(**optdict)
|
|
||||||
|
|
||||||
|
|
||||||
def main_loop(opt):
|
def main():
|
||||||
|
'''
|
||||||
|
run(): 单张图像或单个视频文件的推理,不支持图像序列,
|
||||||
|
'''
|
||||||
|
|
||||||
check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
|
check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
|
||||||
|
opt = parse_opt()
|
||||||
optdict = vars(opt)
|
optdict = vars(opt)
|
||||||
|
|
||||||
# p = r"D:\datasets\ym\永辉测试数据_比对"
|
# p = r"D:\datasets\ym\永辉测试数据_比对"
|
||||||
@ -714,28 +722,22 @@ def main_loop(opt):
|
|||||||
# p = r"D:\datasets\ym\实验室测试"
|
# p = r"D:\datasets\ym\实验室测试"
|
||||||
# p = r"D:\datasets\ym\永辉双摄视频\新建文件夹"
|
# p = r"D:\datasets\ym\永辉双摄视频\新建文件夹"
|
||||||
# p = r"\\192.168.1.28\share\测试_202406\0723\0723_2\20240723-112522_"
|
# p = r"\\192.168.1.28\share\测试_202406\0723\0723_2\20240723-112522_"
|
||||||
p = r"D:\datasets\ym\联华中环"
|
# p = r"D:\datasets\ym\联华中环"
|
||||||
|
p = r"D:\exhibition\images\153112511_0_seek_105.mp4"
|
||||||
|
# p = r"D:\exhibition\images\image"
|
||||||
|
|
||||||
k = 0
|
|
||||||
|
optdict["project"] = r"D:\exhibition\result"
|
||||||
if os.path.isdir(p):
|
if os.path.isdir(p):
|
||||||
files = find_files_in_nested_dirs(p)
|
files = find_video_imgs(p)
|
||||||
|
k = 0
|
||||||
# files = [r"D:\datasets\ym\广告板遮挡测试\8\6926636301004_20240508-175300_back_addGood_70f754088050_215_17327712807.mp4",
|
|
||||||
# r"D:\datasets\ym\videos\标记视频\test_20240402-173935_6920152400975_back_174037372.mp4",
|
|
||||||
# r"D:\datasets\ym\videos\标记视频\test_20240402-173935_6920152400975_front_174037379.mp4",
|
|
||||||
# r"D:\datasets\ym\广告板遮挡测试\8\2500441577966_20240508-175946_front_addGood_70f75407b7ae_155_17788571404.mp4"
|
|
||||||
# ]
|
|
||||||
|
|
||||||
# files = [r"\\192.168.1.28\share\测试_202406\0723\0723_2\20240723-095838_\1_seek_193.mp4"]
|
|
||||||
|
|
||||||
|
|
||||||
for file in files:
|
for file in files:
|
||||||
optdict["source"] = file
|
optdict["source"] = file
|
||||||
run(**optdict)
|
run(**optdict)
|
||||||
|
|
||||||
# k += 1
|
k += 1
|
||||||
# if k == 10:
|
if k == 1:
|
||||||
# break
|
break
|
||||||
elif os.path.isfile(p):
|
elif os.path.isfile(p):
|
||||||
optdict["source"] = p
|
optdict["source"] = p
|
||||||
run(**optdict)
|
run(**optdict)
|
||||||
@ -744,10 +746,8 @@ def main_loop(opt):
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
opt = parse_opt()
|
main()
|
||||||
|
|
||||||
main(opt)
|
|
||||||
# main_loop(opt)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Binary file not shown.
@ -35,9 +35,6 @@ def find_samebox_in_array(arr, target):
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def extract_data(datapath):
|
def extract_data(datapath):
|
||||||
'''
|
'''
|
||||||
0/1_track.data 数据读取
|
0/1_track.data 数据读取
|
||||||
@ -71,7 +68,7 @@ def extract_data(datapath):
|
|||||||
|
|
||||||
boxes, feats, tboxes, tfeats = [], [], [], []
|
boxes, feats, tboxes, tfeats = [], [], [], []
|
||||||
|
|
||||||
if line.find("box:") >= 0 and line.find("output_box:") < 0:
|
if line.find("box:") >= 0 and line.find("output_box:")<0 and line.find("out_boxes")<0:
|
||||||
box = line[line.find("box:") + 4:].strip()
|
box = line[line.find("box:") + 4:].strip()
|
||||||
# if len(box)==6:
|
# if len(box)==6:
|
||||||
boxes.append(str_to_float_arr(box))
|
boxes.append(str_to_float_arr(box))
|
||||||
@ -122,7 +119,9 @@ def extract_data(datapath):
|
|||||||
for line in lines:
|
for line in lines:
|
||||||
line = line.strip() # 去除行尾的换行符和可能的空白字符
|
line = line.strip() # 去除行尾的换行符和可能的空白字符
|
||||||
if not line: # 跳过空行
|
if not line: # 跳过空行
|
||||||
continue
|
tracking_flag = False
|
||||||
|
continue
|
||||||
|
|
||||||
if tracking_flag:
|
if tracking_flag:
|
||||||
if line.find("tracking_") >= 0:
|
if line.find("tracking_") >= 0:
|
||||||
tracking_flag = False
|
tracking_flag = False
|
||||||
@ -176,8 +175,10 @@ def read_tracking_output(filepath):
|
|||||||
boxes.append(data)
|
boxes.append(data)
|
||||||
if data.size == 256:
|
if data.size == 256:
|
||||||
feats.append(data)
|
feats.append(data)
|
||||||
|
|
||||||
assert(len(feats)==len(boxes)), f"{filepath}, len(feats)!=len(boxes)"
|
|
||||||
|
if len(feats) != len(boxes):
|
||||||
|
return np.array([]), np.array([])
|
||||||
|
|
||||||
return np.array(boxes), np.array(feats)
|
return np.array(boxes), np.array(feats)
|
||||||
|
|
||||||
@ -331,7 +332,6 @@ def read_similar(filePath):
|
|||||||
line = line[:-1]
|
line = line[:-1]
|
||||||
Dict = {}
|
Dict = {}
|
||||||
|
|
||||||
|
|
||||||
if not line:
|
if not line:
|
||||||
if len(one2one_list): SimiDict['one2one'] = one2one_list
|
if len(one2one_list): SimiDict['one2one'] = one2one_list
|
||||||
if len(one2n_list): SimiDict['one2n'] = one2n_list
|
if len(one2n_list): SimiDict['one2n'] = one2n_list
|
||||||
|
17
说明文档.txt
17
说明文档.txt
@ -38,7 +38,7 @@
|
|||||||
需分 2 步运行模块:
|
需分 2 步运行模块:
|
||||||
(1) runyolo()
|
(1) runyolo()
|
||||||
|
|
||||||
该模块调用 imgs_inference.py 中模块 run_yolo
|
该模块调用 imgs_inference.py 中模块 run_yolo, 该模块重新定义了类 LoadImages, 对图像进行了旋转。
|
||||||
后续工作:
|
后续工作:
|
||||||
1). 将run_yolo模块与track_redi.yolo_resnet_tracker模块合并
|
1). 将run_yolo模块与track_redi.yolo_resnet_tracker模块合并
|
||||||
2). 图像文件名标准化
|
2). 图像文件名标准化
|
||||||
@ -126,14 +126,14 @@
|
|||||||
|
|
||||||
|
|
||||||
./contrast
|
./contrast
|
||||||
feat_similar.py
|
seqfeat_compare.py
|
||||||
similarity_compare_sequence(root_dir)
|
similarity_compare_sequence(root_dir)
|
||||||
inputs:
|
inputs:
|
||||||
root_dir:文件夹,包含"subimgs"字段,对该文件夹中的相邻图像进行相似度比较
|
root_dir:文件夹,包含"subimgs"字段,对该文件夹中的相邻图像进行相似度比较
|
||||||
silimarity_compare()
|
silimarity_compare()
|
||||||
功能:对imgpaths文件夹中的图像进行相似度比较
|
功能:对imgpaths文件夹中的图像进行相似度比较
|
||||||
|
|
||||||
feat_select.py
|
input_getout_compare.py
|
||||||
creatd_deletedBarcode_front(filepath)
|
creatd_deletedBarcode_front(filepath)
|
||||||
(1) 基于 deletedBarcode.txt, 构造取出事件和相应的放入事件,构成列表并更新这些列表。
|
(1) 基于 deletedBarcode.txt, 构造取出事件和相应的放入事件,构成列表并更新这些列表。
|
||||||
MatchList = [(getout_event, InputList), ...]
|
MatchList = [(getout_event, InputList), ...]
|
||||||
@ -145,6 +145,9 @@
|
|||||||
precision_compare(filepath, savepath)
|
precision_compare(filepath, savepath)
|
||||||
读取 deletedBarcode.txt 和 deletedBarcodeTest.txt 中的数据,进行相似度比较
|
读取 deletedBarcode.txt 和 deletedBarcodeTest.txt 中的数据,进行相似度比较
|
||||||
|
|
||||||
|
stdfeat_analys()
|
||||||
|
|
||||||
|
|
||||||
genfeats.py
|
genfeats.py
|
||||||
get_std_barcodeDict(bcdpath, savepath)
|
get_std_barcodeDict(bcdpath, savepath)
|
||||||
功能: 生成并保存只有一个key值的字典 {barcode: [imgpath1, imgpath1, ...]}
|
功能: 生成并保存只有一个key值的字典 {barcode: [imgpath1, imgpath1, ...]}
|
||||||
@ -207,6 +210,7 @@
|
|||||||
|
|
||||||
|
|
||||||
one2one_contrast.py
|
one2one_contrast.py
|
||||||
|
已修改,未更新。
|
||||||
共6个地址:
|
共6个地址:
|
||||||
(1) stdSamplePath: 用于生成比对标准特征集的原始图像地址
|
(1) stdSamplePath: 用于生成比对标准特征集的原始图像地址
|
||||||
(2) stdBarcodePath: 比对标准特征集原始图像地址的pickle文件存储,{barcode: [imgpath1, imgpath1, ...]}
|
(2) stdBarcodePath: 比对标准特征集原始图像地址的pickle文件存储,{barcode: [imgpath1, imgpath1, ...]}
|
||||||
@ -283,6 +287,11 @@
|
|||||||
(3) featpath:调用 inference_image(), 对每一个barcode,生成字典并进行存储
|
(3) featpath:调用 inference_image(), 对每一个barcode,生成字典并进行存储
|
||||||
|
|
||||||
|
|
||||||
|
time_devide.py
|
||||||
|
runyolo()
|
||||||
|
执行 imgs_inference.py 中的 run_yolo()模块,该模块重新定义了类 LoadImages, 对图像进行了旋转。
|
||||||
|
|
||||||
|
|
||||||
|
show_seri()
|
||||||
|
|
||||||
|
|
Reference in New Issue
Block a user