This commit is contained in:
王庆刚
2024-09-02 11:50:08 +08:00
parent 5109400a57
commit 0cc36ba920
34 changed files with 1401 additions and 275 deletions

131
contrast/feat_similar.py Normal file
View File

@ -0,0 +1,131 @@
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 9 10:36:45 2024
@author: ym
"""
import os
import cv2
import numpy as np
import torch
import sys
from scipy.spatial.distance import cdist
sys.path.append(r"D:\DetectTracking")
from tracking.trackers.reid.reid_interface import ReIDInterface
from tracking.trackers.reid.config import config as ReIDConfig
ReIDEncoder = ReIDInterface(ReIDConfig)
def inference_image(images):
batch_patches = []
patches = []
for d, img1 in enumerate(images):
img = img1[:, :, ::-1].copy() # the model expects RGB inputs
patch = ReIDEncoder.transform(img)
# patch = patch.to(device=self.device).half()
if str(ReIDEncoder.device) != "cpu":
patch = patch.to(device=ReIDEncoder.device).half()
else:
patch = patch.to(device=ReIDEncoder.device)
patches.append(patch)
if (d + 1) % ReIDEncoder.batch_size == 0:
patches = torch.stack(patches, dim=0)
batch_patches.append(patches)
patches = []
if len(patches):
patches = torch.stack(patches, dim=0)
batch_patches.append(patches)
features = np.zeros((0, ReIDEncoder.embedding_size))
for patches in batch_patches:
pred = ReIDEncoder.model(patches)
pred[torch.isinf(pred)] = 1.0
feat = pred.cpu().data.numpy()
features = np.vstack((features, feat))
return features
def similarity_compare(root_dir):
'''
root_dir包含 "subimgs"字段的文件夹中图像为 subimg子图
功能:相邻帧子图间相似度比较
'''
all_files = []
extensions = ['.png', '.jpg']
for dirpath, dirnames, filenames in os.walk(root_dir):
filepaths = []
for filename in filenames:
if os.path.basename(dirpath).find('subimgs') < 0:
continue
file, ext = os.path.splitext(filename)
if ext in extensions:
imgpath = os.path.join(dirpath, filename)
filepaths.append(imgpath)
nf = len(filepaths)
if nf==0:
continue
fnma = os.path.basename(filepaths[0]).split('.')[0]
imga = cv2.imread(filepaths[0])
ha, wa = imga.shape[:2]
for i in range(1, nf):
fnmb = os.path.basename(filepaths[i]).split('.')[0]
imgb = cv2.imread(filepaths[i])
hb, wb = imgb.shape[:2]
feats = inference_image(((imga, imgb)))
similar = 1 - np.maximum(0.0, cdist(feats, feats, metric='cosine'))
h, w = max((ha, hb)), max((wa, wb))
img = np.zeros(((h, 2*w, 3)), np.uint8)
img[0:ha, 0:wa], img[0:hb, w:(w+wb)] = imga, imgb
linewidth = max(round(((h+2*w))/2 * 0.001), 2)
cv2.putText(img,
text=f'{similar[0,1]:.2f}', # Text string to be drawn
org=(max(w-20, 10), h-10), # Bottom-left corner of the text string
fontFace=0, # Font type
fontScale=linewidth/3, # Font scale factor
color=(0, 0, 255), # Text color
thickness=linewidth, # Thickness of the lines used to draw a text
lineType=cv2.LINE_AA, # Line type
)
spath = os.path.join(dirpath, 's'+fnma+'-vs-'+fnmb+'.png')
cv2.imwrite(spath, img)
fnma = os.path.basename(filepaths[i]).split('.')[0]
imga = imgb.copy()
ha, wa = imga.shape[:2]
return
def main():
root_dir = r"D:\contrast\dataset\result\20240723-112242_6923790709882"
try:
similarity_compare(root_dir)
except Exception as e:
print(f'Error: {e}')
if __name__ == '__main__':
main()

View File

@ -172,7 +172,10 @@ def run(
if is_url and is_file: if is_url and is_file:
source = check_file(source) # download source = check_file(source) # download
save_dir = Path(project) / Path(source).stem
# spth = source.split('\\')[-2] + "_" + Path(source).stem
save_dir = Path(project) / Path(source.split('\\')[-2] + "_" + str(Path(source).stem))
# save_dir = Path(project) / Path(source).stem
if save_dir.exists(): if save_dir.exists():
print(Path(source).stem) print(Path(source).stem)
# return # return
@ -387,6 +390,8 @@ def run(
# Print time (inference-only) # Print time (inference-only)
LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
if track_boxes.size == 0:
return
## ======================================================================== written by WQG ## ======================================================================== written by WQG
## track_boxes: Array, [x1, y1, x2, y2, track_id, score, cls, frame_index, box_id] ## track_boxes: Array, [x1, y1, x2, y2, track_id, score, cls, frame_index, box_id]
@ -397,7 +402,7 @@ def run(
filename = os.path.split(save_path_img)[-1] filename = os.path.split(save_path_img)[-1]
'''======================== 1. save in './run/detect/' ====================''' '''======================== 1. save in './run/detect/' ===================='''
if source.find("front") >= 0: if source.find("front") >= 0 or Path(source).stem.split('_')[0] == '1':
carttemp = cv2.imread("./tracking/shopcart/cart_tempt/board_ftmp_line.png") carttemp = cv2.imread("./tracking/shopcart/cart_tempt/board_ftmp_line.png")
else: else:
carttemp = cv2.imread("./tracking/shopcart/cart_tempt/edgeline.png") carttemp = cv2.imread("./tracking/shopcart/cart_tempt/edgeline.png")
@ -516,10 +521,11 @@ def main_loop(opt):
optdict = vars(opt) optdict = vars(opt)
# p = r"D:\datasets\ym\永辉测试数据_比对" # p = r"D:\datasets\ym\永辉测试数据_比对"
p = r"D:\datasets\ym\广告板遮挡测试\8" # p = r"D:\datasets\ym\广告板遮挡测试\8"
# p = r"D:\datasets\ym\videos\标记视频" # p = r"D:\datasets\ym\videos\标记视频"
# p = r"D:\datasets\ym\实验室测试" # p = r"D:\datasets\ym\实验室测试"
# p = r"D:\datasets\ym\永辉双摄视频\新建文件夹" # p = r"D:\datasets\ym\永辉双摄视频\新建文件夹"
p = r"\\192.168.1.28\share\测试_202406\0723\0723_2\20240723-112522_"
k = 0 k = 0
if os.path.isdir(p): if os.path.isdir(p):
@ -531,16 +537,16 @@ def main_loop(opt):
# r"D:\datasets\ym\广告板遮挡测试\8\2500441577966_20240508-175946_front_addGood_70f75407b7ae_155_17788571404.mp4" # r"D:\datasets\ym\广告板遮挡测试\8\2500441577966_20240508-175946_front_addGood_70f75407b7ae_155_17788571404.mp4"
# ] # ]
files = [r"D:\datasets\ym\广告板遮挡测试\8\6907149227609_20240508-174733_back_returnGood_70f754088050_425_17327712807.mp4"] # files = [r"\\192.168.1.28\share\测试_202406\0723\0723_2\20240723-095838_\1_seek_193.mp4"]
for file in files: for file in files:
optdict["source"] = file optdict["source"] = file
run(**optdict) run(**optdict)
k += 1 # k += 1
if k == 1: # if k == 10:
break # break
elif os.path.isfile(p): elif os.path.isfile(p):
optdict["source"] = p optdict["source"] = p
run(**vars(opt)) run(**vars(opt))

View File

@ -346,14 +346,6 @@ def performance_evaluate(all_list, isshow=False):
return errpairs, corrpairs, err_similarity, correct_similarity return errpairs, corrpairs, err_similarity, correct_similarity
return errpairs, corrpairs, err_similarity, correct_similarity
def contrast_analysis(del_barcode_file, basepath, savepath, saveimgs=False): def contrast_analysis(del_barcode_file, basepath, savepath, saveimgs=False):
@ -417,21 +409,20 @@ def contrast_loop(fpath):
# plt2.savefig(os.path.join(savepath, file+'_hist.png')) # plt2.savefig(os.path.join(savepath, file+'_hist.png'))
# plt.close() # plt.close()
def main(): def main():
fpath = r'\\192.168.1.28\share\测试_202406\deletedBarcode\other' fpath = r'\\192.168.1.28\share\测试_202406\deletedBarcode\other'
contrast_loop(fpath) contrast_loop(fpath)
def main1(): def main1():
del_barcode_file = 'D:/contrast/dataset/compairsonResult/deletedBarcode_20240709_pm.txt' del_barcode_file = r'\\192.168.1.28\share\测试_202406\709\deletedBarcode.txt'
basepath = r'D:\contrast\dataset\1_to_n\709' basepath = r'\\192.168.1.28\share\测试_202406\709'
savepath = r'D:\contrast\dataset\result' savepath = r'D:\contrast\dataset\result'
try: try:
relative_path = contrast_analysis(del_barcode_file, basepath, savepath) relative_path = contrast_analysis(del_barcode_file, basepath, savepath)
except Exception as e: except Exception as e:
print(f'Error Type: {e}') print(f'Error Type: {e}')
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -0,0 +1,332 @@
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 30 17:53:03 2024
1. 确认在相同CamerType下track.data 中 CamerID 项数量 = 图像数 = 帧ID数 = 最大帧ID
2. 读取0/1_tracking_output.data 中数据boxes、featslen(boxes)=len(feats)
帧ID约束
3. 优先选择前摄
4. 保存图像数据
5. 一次购物事件类型
shopEvent: {barcode:
type: getout, input
front_traj:[{imgpath: str,
box: arrar(1, 9),
feat: array(1, 256)
}]
back_traj: [{imgpath: str,
box: arrar(1, 9),
feat: array(1, 256)
}]
}
@author: ym
"""
import numpy as np
import cv2
import os
import sys
import json
sys.path.append(r"D:\DetectTracking")
from tracking.utils.read_data import extract_data, read_tracking_output, read_deletedBarcode_file
IMG_FORMAT = ['.bmp', '.jpg', '.jpeg', '.png']
def creat_shopping_event(basepath):
eventList = []
'''一、构造放入商品事件列表'''
k = 0
for filename in os.listdir(basepath):
# filename = "20240723-155413_6904406215720"
'''filename下为一次购物事件'''
filepath = os.path.join(basepath, filename)
'''================ 0. 检查 filename 及 filepath 正确性和有效性 ================'''
nmlist = filename.split('_')
if filename.find('2024')<0 or len(nmlist)!=2 or len(nmlist[0])!=15 or len(nmlist[1])<11:
continue
if not os.path.isdir(filepath): continue
print(f"Event name: {filename}")
'''================ 1. 构造事件描述字典,暂定 9 items ==============='''
event = {}
event['barcode'] = nmlist[1]
event['type'] = 'input'
event['filepath'] = filepath
event['back_imgpaths'] = []
event['front_imgpaths'] = []
event['back_boxes'] = np.empty((0, 9), dtype=np.float64)
event['front_boxes'] = np.empty((0, 9), dtype=np.float64)
event['back_feats'] = np.empty((0, 256), dtype=np.float64)
event['front_feats'] = np.empty((0, 256), dtype=np.float64)
# event['feats_compose'] = np.empty((0, 256), dtype=np.float64)
# event['feats_select'] = np.empty((0, 256), dtype=np.float64)
'''================= 1. 读取 data 文件 ============================='''
for dataname in os.listdir(filepath):
# filename = '1_track.data'
datapath = os.path.join(filepath, dataname)
if not os.path.isfile(datapath): continue
CamerType = dataname.split('_')[0]
''' 3.1 读取 0/1_track.data 中数据,暂不考虑'''
# if dataname.find("_track.data")>0:
# bboxes, ffeats, trackerboxes, tracker_feat_dict, trackingboxes, tracking_feat_dict = extract_data(datapath)
''' 3.2 读取 0/1_tracking_output.data 中数据'''
if dataname.find("_tracking_output.data")>0:
tracking_output_boxes, tracking_output_feats = read_tracking_output(datapath)
if len(tracking_output_boxes) != len(tracking_output_feats): continue
if CamerType == '0':
event['back_boxes'] = tracking_output_boxes
event['back_feats'] = tracking_output_feats
elif CamerType == '1':
event['front_boxes'] = tracking_output_boxes
event['front_feats'] = tracking_output_feats
# '''1.1 事件的特征表征方式选择'''
# bk_feats = event['back_feats']
# ft_feats = event['front_feats']
# feats_compose = np.empty((0, 256), dtype=np.float64)
# if len(ft_feats):
# feats_compose = np.concatenate((feats_compose, ft_feats), axis=0)
# if len(bk_feats):
# feats_compose = np.concatenate((feats_compose, bk_feats), axis=0)
# event['feats_compose'] = feats_compose
# '''3. 构造前摄特征'''
# if len(ft_feats):
# event['feats_select'] = ft_feats
'''================ 2. 读取图像文件地址并按照帧ID排序 ============='''
frontImgs, frontFid = [], []
backImgs, backFid = [], []
for imgname in os.listdir(filepath):
name, ext = os.path.splitext(imgname)
if ext not in IMG_FORMAT or name.find('frameId')<0: continue
CamerType = name.split('_')[0]
frameId = int(name.split('_')[3])
imgpath = os.path.join(filepath, imgname)
if CamerType == '0':
backImgs.append(imgpath)
backFid.append(frameId)
if CamerType == '1':
frontImgs.append(imgpath)
frontFid.append(frameId)
frontIdx = np.argsort(np.array(frontFid))
backIdx = np.argsort(np.array(backFid))
'''2.1 生成依据帧 ID 排序的前后摄图像地址列表'''
frontImgs = [frontImgs[i] for i in frontIdx]
backImgs = [backImgs[i] for i in backIdx]
'''2.2 将前、后摄图像路径添加至事件字典'''
bfid = event['back_boxes'][:, 7].astype(np.int64)
ffid = event['front_boxes'][:, 7].astype(np.int64)
if len(bfid) and max(bfid) <= len(backImgs):
event['back_imgpaths'] = [backImgs[i-1] for i in bfid]
if len(ffid) and max(ffid) <= len(frontImgs):
event['front_imgpaths'] = [frontImgs[i-1] for i in ffid]
'''================ 3. 判断当前事件有效性,并添加至事件列表 =========='''
condt1 = len(event['back_imgpaths'])==0 or len(event['front_imgpaths'])==0
condt2 = len(event['front_feats'])==0 and len(event['back_feats'])==0
if condt1 or condt2:
print(f" Error, condt1: {condt1}, condt2: {condt2}")
continue
eventList.append(event)
# k += 1
# if k==1:
# continue
'''一、构造放入商品事件列表,暂不处理'''
# delepath = os.path.join(basepath, 'deletedBarcode.txt')
# bcdList = read_deletedBarcode_file(delepath)
# for slist in bcdList:
# getoutFold = slist['SeqDir'].strip()
# getoutPath = os.path.join(basepath, getoutFold)
# '''取出事件文件夹不存在,跳出循环'''
# if not os.path.exists(getoutPath) and not os.path.isdir(getoutPath):
# continue
# ''' 生成取出事件字典 '''
# event = {}
# event['barcode'] = slist['Deleted'].strip()
# event['type'] = 'getout'
# event['basepath'] = getoutPath
return eventList
def get_std_barcodeDict(bcdpath):
stdBlist = []
for filename in os.listdir(bcdpath):
filepath = os.path.join(bcdpath, filename)
if not os.path.isdir(filepath) or not filename.isdigit(): continue
stdBlist.append(filename)
bcdpaths = [(barcode, os.path.join(bcdpath, barcode)) for barcode in stdBlist]
stdBarcodeDict = {}
for barcode, bpath in bcdpaths:
stdBarcodeDict[barcode] = []
for root, dirs, files in os.walk(bpath):
imgpaths = []
if "base" in dirs:
broot = os.path.join(root, "base")
for imgname in os.listdir(broot):
imgpath = os.path.join(broot, imgname)
_, ext = os.path.splitext(imgpath)
if ext not in IMG_FORMAT: continue
imgpaths.append(imgpath)
stdBarcodeDict[barcode].extend(imgpaths)
break
else:
for imgname in files:
imgpath = os.path.join(root, imgname)
_, ext = os.path.splitext(imgpath)
if ext not in IMG_FORMAT: continue
imgpaths.append(imgpath)
stdBarcodeDict[barcode].extend(imgpaths)
with open('stdBarcodeDict.json', 'wb') as f:
json.dump(stdBarcodeDict, f)
return stdBarcodeDict
def one2one_test(filepath):
savepath = r'\\192.168.1.28\share\测试_202406\contrast'
'''获得 Barcode 列表'''
bcdpath = r'\\192.168.1.28\share\已标注数据备份\对比数据\barcode\barcode_1771'
stdBarcodeDict = get_std_barcodeDict(bcdpath)
eventList = creat_shopping_event(filepath)
print("=========== eventList have generated! ===========")
barcodeDict = {}
for event in eventList:
'''9 items: barcode, type, filepath, back_imgpaths, front_imgpaths,
back_boxes, front_boxes, back_feats, front_feats
'''
barcode = event['barcode']
if barcode not in stdBarcodeDict.keys():
continue
if len(event['feats_select']):
event_feats = event['feats_select']
elif len(event['back_feats']):
event_feats = event['back_feats']
else:
continue
std_bcdpath = os.path.join(bcdpath, barcode)
for root, dirs, files in os.walk(std_bcdpath):
if "base" in files:
std_bcdpath = os.path.join(root, "base")
break
'''保存一次购物事件的轨迹子图'''
basename = os.path.basename(event['filepath'])
spath = os.path.join(savepath, basename)
if not os.path.exists(spath):
os.makedirs(spath)
cameras = ('front', 'back')
for camera in cameras:
if camera == 'front':
boxes = event['front_boxes']
imgpaths = event['front_imgpaths']
else:
boxes = event['back_boxes']
imgpaths = event['back_imgpaths']
for i, box in enumerate(boxes):
x1, y1, x2, y2, tid, score, cls, fid, bid = box
imgpath = imgpaths[i]
image = cv2.imread(imgpath)
subimg = image[int(y1/2):int(y2/2), int(x1/2):int(x2/2), :]
camerType, timeTamp, _, frameID = os.path.basename(imgpath).split('.')[0].split('_')
subimgName = f"{camerType}_{tid}_fid({fid}, {frameID}).png"
subimgPath = os.path.join(spath, subimgName)
cv2.imwrite(subimgPath, subimg)
print(f"Image saved: {basename}")
def main():
fplist = [r'\\192.168.1.28\share\测试_202406\0723\0723_1',
r'\\192.168.1.28\share\测试_202406\0723\0723_2',
# r'\\192.168.1.28\share\测试_202406\0723\0723_3',
r'\\192.168.1.28\share\测试_202406\0722\0722_01',
r'\\192.168.1.28\share\测试_202406\0722\0722_02'
]
for filepath in fplist:
one2one_test(filepath)
# for filepath in fplist:
# try:
# one2one_test(filepath)
# except Exception as e:
# print(f'{filepath}, Error: {e}')
if __name__ == '__main__':
main()

View File

@ -96,7 +96,7 @@ class Track:
self.isCornpoint = False self.isCornpoint = False
self.imgshape = imgshape self.imgshape = imgshape
self.isBorder = False # self.isBorder = False
# self.state = MoveState.Unknown # self.state = MoveState.Unknown
'''轨迹开始帧、结束帧 ID''' '''轨迹开始帧、结束帧 ID'''
@ -157,10 +157,12 @@ class Track:
def compute_cornpts_feats(self): def compute_cornpts_feats(self):
''' '''
''' '''
# print(f"TrackID: {self.tid}")
trajectory = [] trajectory = []
trajlens = [] trajlens = []
trajdist = [] trajdist = []
trajrects = [] trajrects = []
trajrects_wh = []
for k in range(5): for k in range(5):
# diff_xy2 = np.power(np.diff(self.cornpoints[:, 2*k:2*(k+1)], axis = 0), 2) # diff_xy2 = np.power(np.diff(self.cornpoints[:, 2*k:2*(k+1)], axis = 0), 2)
# trajlen = np.sum(np.sqrt(np.sum(diff_xy2, axis = 1))) # trajlen = np.sum(np.sqrt(np.sum(diff_xy2, axis = 1)))
@ -182,12 +184,17 @@ class Track:
rect[0]: 旋转角度 (-90°, 0] rect[0]: 旋转角度 (-90°, 0]
''' '''
rect = cv2.minAreaRect(X.astype(np.int64)) rect = cv2.minAreaRect(X.astype(np.int64))
rect_wh = max(rect[1])
trajrects_wh.append(rect_wh)
trajrects.append(rect) trajrects.append(rect)
self.trajectory = trajectory self.trajectory = trajectory
self.trajlens = trajlens self.trajlens = trajlens
self.trajdist = trajdist self.trajdist = trajdist
self.trajrects = trajrects self.trajrects = trajrects
self.trajrects_wh = trajrects_wh
@ -198,12 +205,17 @@ class Track:
-最小轨迹长度trajlen_min -最小轨迹长度trajlen_min
-最小轨迹欧氏距离trajdist_max -最小轨迹欧氏距离trajdist_max
''' '''
idx1 = self.trajlens.index(max(self.trajlens))
# idx1 = self.trajlens.index(max(self.trajlens))
idx1 = self.trajrects_wh.index(max(self.trajrects_wh))
trajmax = self.trajectory[idx1] trajmax = self.trajectory[idx1]
trajlen_max = self.trajlens[idx1] trajlen_max = self.trajlens[idx1]
trajdist_max = self.trajdist[idx1] trajdist_max = self.trajdist[idx1]
if not self.isCornpoint: if not self.isCornpoint:
idx2 = self.trajlens.index(min(self.trajlens)) # idx2 = self.trajlens.index(min(self.trajlens))
idx2 = self.trajrects_wh.index(min(self.trajrects_wh))
trajmin = self.trajectory[idx2] trajmin = self.trajectory[idx2]
trajlen_min = self.trajlens[idx2] trajlen_min = self.trajlens[idx2]
trajdist_min = self.trajdist[idx2] trajdist_min = self.trajdist[idx2]
@ -284,7 +296,7 @@ class Track:
camerType: back, 后置摄像头 camerType: back, 后置摄像头
front, 前置摄像头 front, 前置摄像头
''' '''
if camerType=="front": if camerType=="back":
incart = cv2.imread("./shopcart/cart_tempt/incart.png", cv2.IMREAD_GRAYSCALE) incart = cv2.imread("./shopcart/cart_tempt/incart.png", cv2.IMREAD_GRAYSCALE)
outcart = cv2.imread("./shopcart/cart_tempt/outcart.png", cv2.IMREAD_GRAYSCALE) outcart = cv2.imread("./shopcart/cart_tempt/outcart.png", cv2.IMREAD_GRAYSCALE)
else: else:
@ -487,6 +499,14 @@ class doTracks:
blist = [b for b in alist] blist = [b for b in alist]
alist = [] alist = []
for btrack in blist: for btrack in blist:
# afids = []
# for track in cur_list:
# afids.extend(list(track.boxes[:, 7].astype(np.int_)))
# bfids = btrack.boxes[:, 7].astype(np.int_)
# interfid = set(afids).intersection(set(bfids))
# if len(interfid):
# print("wait!!!")
# if track_equal_track(atrack, btrack) and len(interfid)==0:
if track_equal_track(atrack, btrack): if track_equal_track(atrack, btrack):
cur_list.append(btrack) cur_list.append(btrack)
else: else:

View File

@ -155,6 +155,7 @@ class doBackTracks(doTracks):
def merge_tracks(self, Residual): def merge_tracks(self, Residual):
""" """
对不同id但可能是同一商品的目标进行归并 对不同id但可能是同一商品的目标进行归并
和 dotrack_front.py中函数相同可以合并可以合并至基类
""" """
mergedTracks = self.base_merge_tracks(Residual) mergedTracks = self.base_merge_tracks(Residual)

View File

@ -47,6 +47,7 @@ class doFrontTracks(doTracks):
tracks_free = [t for t in tracks if t.frnum>1 and t.is_freemove()] tracks_free = [t for t in tracks if t.frnum>1 and t.is_freemove()]
self.FreeMove.extend(tracks_free) self.FreeMove.extend(tracks_free)
tracks = self.sub_tracks(tracks, tracks_free)
# [self.associate_with_hand(htrack, gtrack) for htrack in hand_tracks for gtrack in tracks] # [self.associate_with_hand(htrack, gtrack) for htrack in hand_tracks for gtrack in tracks]
'''轨迹循环归并''' '''轨迹循环归并'''
@ -126,6 +127,7 @@ class doFrontTracks(doTracks):
def merge_tracks(self, Residual): def merge_tracks(self, Residual):
""" """
对不同id但可能是同一商品的目标进行归并 对不同id但可能是同一商品的目标进行归并
和 dotrack_back.py中函数相同可以合并至基类
""" """
mergedTracks = self.base_merge_tracks(Residual) mergedTracks = self.base_merge_tracks(Residual)

View File

@ -165,7 +165,7 @@ class frontTrack(Track):
'''情况2中心点向上 ''' '''情况2中心点向上 '''
## 商品中心点向上移动但没有关联的Hand轨迹也不是左右边界点 ## 商品中心点向上移动但没有关联的Hand轨迹也不是左右边界点
condt_b = condt0 and len(self.Hands)==0 and y0[-1] < y0[0] and (not self.is_edge_cornpoint()) condt_b = condt0 and len(self.Hands)==0 and y0[-1] < y0[0] and (not self.is_edge_cornpoint()) and min(y0)>self.CART_HIGH_THRESH1
'''情况3: 商品在购物车内,但运动方向无序''' '''情况3: 商品在购物车内,但运动方向无序'''

View File

@ -619,7 +619,6 @@ def match_evaluate(filename = r'./matching/featdata/MatchDict.pkl'):
def have_tracked(): def have_tracked():
featdir = r"./data/trackfeats"
trackdir = r"./data/tracks" trackdir = r"./data/tracks"
# ============================================================================= # =============================================================================
@ -634,35 +633,25 @@ def have_tracked():
MatchingDict = {} MatchingDict = {}
k, gt = 0, Profile() k, gt = 0, Profile()
for filename in os.listdir(featdir): for filename in os.listdir(trackdir):
file, ext = os.path.splitext(filename) file, ext = os.path.splitext(filename)
# if file not in FileList: continue # if file not in FileList: continue
if file.find('20240508')<0: continue if file.find('20240508')<0: continue
if file.find('17327712807')<0: continue filepath = os.path.join(trackdir, filename)
trackpath = os.path.join(trackdir, file + ".npy")
featpath = os.path.join(featdir, filename) tracksDict = np.load(filepath, allow_pickle=True)
bboxes = tracksDict['TrackBoxes']
bboxes = np.load(trackpath)
features_dict = np.load(featpath, allow_pickle=True)
with gt: with gt:
if filename.find("front") >= 0: if filename.find("front") >= 0:
vts = doFrontTracks(bboxes, features_dict) vts = doFrontTracks(bboxes, tracksDict)
vts.classify() vts.classify()
plt = plot_frameID_y2(vts)
savedir = save_dir.joinpath(f'{file}_y2.png')
plt.savefig(savedir)
plt.close()
elif filename.find("back") >= 0: elif filename.find("back") >= 0:
vts = doBackTracks(bboxes, features_dict) vts = doBackTracks(bboxes, tracksDict)
vts.classify() vts.classify()
edgeline = cv2.imread("./shopcart/cart_tempt/edgeline.png")
draw_all_trajectories(vts, edgeline, save_dir, filename)
print(file+f" need time: {gt.dt:.2f}s") print(file+f" need time: {gt.dt:.2f}s")
elements = file.split('_') elements = file.split('_')
@ -691,7 +680,7 @@ def have_tracked():
box = boxes[i, :] box = boxes[i, :]
tid, fid, bid = int(box[4]), int(box[7]), int(box[8]) tid, fid, bid = int(box[4]), int(box[7]), int(box[8])
feat_dict = features_dict[fid] feat_dict = tracksDict[fid]
feature = feat_dict[bid] feature = feat_dict[bid]
img = feat_dict[f'{bid}_img'] img = feat_dict[f'{bid}_img']

View File

@ -30,7 +30,14 @@ def compute_similar(feat1, feat2):
def update_event(datapath): def update_event(datapath):
'''一次购物事件,包含 8 个keys''' '''一次购物事件,包含 8 个keys
back_sole_boxes后摄boxes
front_sole_boxes前摄boxes
back_sole_feats后摄特征
front_sole_feats前摄特征
feats_compose将前后摄特征进行合并
feats_select特征选择优先选择前摄特征
'''
event = {} event = {}
# event['front_tracking_boxes'] = [] # event['front_tracking_boxes'] = []
# event['front_tracking_feats'] = {} # event['front_tracking_feats'] = {}
@ -157,6 +164,10 @@ def update_event(datapath):
def creatd_deletedBarcode_front(filepath): def creatd_deletedBarcode_front(filepath):
'''
生成deletedBarcodeTest.txt
'''
# filepath = r'\\192.168.1.28\share\测试_202406\0723\0723_1\deletedBarcode.txt' # filepath = r'\\192.168.1.28\share\测试_202406\0723\0723_1\deletedBarcode.txt'
basepath, _ = os.path.split(filepath) basepath, _ = os.path.split(filepath)
@ -281,7 +292,7 @@ def creatd_deletedBarcode_front(filepath):
print('Step 3: Similarity conputation Done!') print('Step 3: Similarity conputation Done!')
wpath = os.path.split(filepath)[0] wpath = os.path.split(filepath)[0]
wfile = os.path.join(wpath, 'deletedBarcodeTest_x.txt') wfile = os.path.join(wpath, 'deletedBarcodeTest.txt')
with open(wfile, 'w', encoding='utf-8') as file: with open(wfile, 'w', encoding='utf-8') as file:
for result in results: for result in results:
@ -299,11 +310,14 @@ def creatd_deletedBarcode_front(filepath):
print('Step 4: File writting Done!') print('Step 4: File writting Done!')
def precision_compare(filepath, savepath):
'''
1. deletedBarcode.txt 中的相似度的计算为现场算法前后摄轨迹特征合并
def compute_precision(filepath, savepath): 2. deletedBarcodeTest.txt 中的 3 个相似度计算方式依次为:
(1)现场算法前后摄轨迹特征合并;
(2)本地算法前后摄轨迹特征合并;
(3)本地算法优先选择前摄
'''
fpath = os.path.split(filepath)[0] fpath = os.path.split(filepath)[0]
_, basefile = os.path.split(fpath) _, basefile = os.path.split(fpath)
@ -336,11 +350,16 @@ def compute_precision(filepath, savepath):
plt1.title(basefile + ', front') plt1.title(basefile + ', front')
plt2.savefig(os.path.join(savepath, basefile+'_pr_front.png')) plt2.savefig(os.path.join(savepath, basefile+'_pr_front.png'))
plt2.close() plt2.close()
def main(): def main():
'''
1. 成deletedBarcodeTest.txt
2. 不同特征选择下的精度比对性能比较
'''
fplist = [#r'\\192.168.1.28\share\测试_202406\0723\0723_1\deletedBarcode.txt', fplist = [#r'\\192.168.1.28\share\测试_202406\0723\0723_1\deletedBarcode.txt',
# r'\\192.168.1.28\share\测试_202406\0723\0723_2\deletedBarcode.txt', # r'\\192.168.1.28\share\测试_202406\0723\0723_2\deletedBarcode.txt',
# r'\\192.168.1.28\share\测试_202406\0723\0723_3\deletedBarcode.txt', r'\\192.168.1.28\share\测试_202406\0723\0723_3\deletedBarcode.txt',
# r'\\192.168.1.28\share\测试_202406\0722\0722_01\deletedBarcode.txt', # r'\\192.168.1.28\share\测试_202406\0722\0722_01\deletedBarcode.txt',
# r'\\192.168.1.28\share\测试_202406\0722\0722_02\deletedBarcode.txt', # r'\\192.168.1.28\share\测试_202406\0722\0722_02\deletedBarcode.txt',
# r'\\192.168.1.28\share\测试_202406\0719\719_1\deletedBarcode.txt', # r'\\192.168.1.28\share\测试_202406\0719\719_1\deletedBarcode.txt',
@ -376,25 +395,19 @@ def main():
# r'\\192.168.1.28\share\测试_202406\627\deletedBarcode.txt', # r'\\192.168.1.28\share\测试_202406\627\deletedBarcode.txt',
] ]
fplist = [#r'\\192.168.1.28\share\测试_202406\0723\0723_1\deletedBarcode.txt',
# r'\\192.168.1.28\share\测试_202406\0723\0723_3\deletedBarcode.txt',
r'\\192.168.1.28\share\测试_202406\0723\0723_3\deletedBarcodeTest.txt',
]
savepath = r'\\192.168.1.28\share\测试_202406\deletedBarcode\illustration' savepath = r'\\192.168.1.28\share\测试_202406\deletedBarcode\illustration'
for filepath in fplist: for filepath in fplist:
print(filepath) try:
# creatd_deletedBarcode_front(filepath) #1. 生成deletedBarcodeTest.txt 文件
compute_precision(filepath, savepath) creatd_deletedBarcode_front(filepath)
# try: #2. 确保该目录下存在deletedBarcode.txt, deletedBarcodeTest.txt 文件
# creatd_deletedBarcode_front(filepath) precision_compare(filepath, savepath)
# compute_pres(filepath, savepath) except Exception as e:
# except Exception as e: print(f'{filepath}, Error: {e}')
# print(f'{filepath}, Error: {e}')
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View File

@ -25,110 +25,14 @@ from tracking.utils.drawtracks import plot_frameID_y2, draw_all_trajectories
from tracking.utils.read_data import extract_data, read_deletedBarcode_file, read_tracking_output from tracking.utils.read_data import extract_data, read_deletedBarcode_file, read_tracking_output
from contrast_analysis import contrast_analysis from contrast_analysis import contrast_analysis
from tracking.utils.annotator import TrackAnnotator from tracking.utils.annotator import TrackAnnotator
W, H = 1024, 1280 W, H = 1024, 1280
Mode = 'front' #'back' Mode = 'front' #'back'
ImgFormat = ['.jpg', '.jpeg', '.png', '.bmp'] ImgFormat = ['.jpg', '.jpeg', '.png', '.bmp']
def video2imgs(path):
vpath = os.path.join(path, "videos")
k = 0
have = False
for filename in os.listdir(vpath):
file, ext = os.path.splitext(filename)
imgdir = os.path.join(path, file)
if os.path.exists(imgdir):
continue
else:
os.mkdir(imgdir)
vfile = os.path.join(vpath, filename)
cap = cv2.VideoCapture(vfile)
i = 0
while True:
ret, frame = cap.read()
if not ret:
break
i += 1
imgp = os.path.join(imgdir, file+f"_{i}.png")
cv2.imwrite(imgp, frame)
print(filename+f": {i}")
cap.release()
k+=1
if k==1000:
break
def draw_boxes():
datapath = r'D:\datasets\ym\videos_test\20240530\1_tracker_inout(1).data'
VideosData = read_tracker_input(datapath)
bboxes = VideosData[0][0]
ffeats = VideosData[0][1]
videopath = r"D:\datasets\ym\videos_test\20240530\134458234-1cd970cf-f8b9-4e80-9c2e-7ca3eec83b81-1_seek0.10415589124891511.mp4"
cap = cv2.VideoCapture(videopath)
i = 0
while True:
ret, frame = cap.read()
if not ret:
break
annotator = Annotator(frame.copy(), line_width=3)
boxes = bboxes[i]
for *xyxy, conf, cls in reversed(boxes):
label = f'{int(cls)}: {conf:.2f}'
color = colors(int(cls), True)
annotator.box_label(xyxy, label, color=color)
img = annotator.result()
imgpath = r"D:\datasets\ym\videos_test\20240530\result\int8_front\{}.png".format(i+1)
cv2.imwrite(imgpath, img)
print(f"Output: {i}")
i += 1
cap.release()
def read_imgs(imgspath, CamerType):
imgs, frmIDs = [], []
for filename in os.listdir(imgspath):
file, ext = os.path.splitext(filename)
flist = file.split('_')
if len(flist)==4 and ext in ImgFormat:
camID, frmID = flist[0], int(flist[-1])
imgpath = os.path.join(imgspath, filename)
img = cv2.imread(imgpath)
if camID==CamerType:
imgs.append(img)
frmIDs.append(frmID)
if len(frmIDs):
indice = np.argsort(np.array(frmIDs))
imgs = [imgs[i] for i in indice]
return imgs
pass
'''调用tracking()函数,利用本地跟踪算法获取各目标轨迹,可以比较本地跟踪算法与现场跟踪算法的区别。'''
def init_tracker(tracker_yaml = None, bs=1): def init_tracker(tracker_yaml = None, bs=1):
""" """
Initialize tracker for object tracking during prediction. Initialize tracker for object tracking during prediction.
@ -177,38 +81,45 @@ def tracking(bboxes, ffeats):
return TrackBoxes, TracksDict return TrackBoxes, TracksDict
def read_imgs(imgspath, CamerType):
'''
inputs:
imgspath序列图像地址
CamerType相机类型0后摄1前摄
outputs
imgs图像序列
功能:
根据CamerType类型读取imgspath文件夹中的图像并根据帧索引进行排序。
do_tracking()中调用该函数实现1读取imgs并绘制各目标轨迹框2获取subimgs
'''
imgs, frmIDs = [], []
for filename in os.listdir(imgspath):
file, ext = os.path.splitext(filename)
flist = file.split('_')
if len(flist)==4 and ext in ImgFormat:
camID, frmID = flist[0], int(flist[-1])
imgpath = os.path.join(imgspath, filename)
img = cv2.imread(imgpath)
if camID==CamerType:
imgs.append(img)
frmIDs.append(frmID)
if len(frmIDs):
def do_tracker_tracking(fpath, save_dir): indice = np.argsort(np.array(frmIDs))
bboxes, ffeats, trackerboxes, tracker_feat_dict, trackingboxes, tracking_feat_dict = extract_data(fpath) imgs = [imgs[i] for i in indice]
tboxes, feats_dict = tracking(bboxes, ffeats)
CamerType = os.path.basename(fpath).split('_')[0]
dirname = os.path.split(os.path.split(fpath)[0])[1]
if CamerType == '1':
vts = doFrontTracks(tboxes, feats_dict)
vts.classify()
plt = plot_frameID_y2(vts) return imgs
plt.savefig('front_y2.png')
# plt.close()
elif CamerType == '0':
vts = doBackTracks(tboxes, feats_dict)
vts.classify()
filename = dirname+'_' + CamerType
edgeline = cv2.imread("./shopcart/cart_tempt/edgeline.png")
draw_all_trajectories(vts, edgeline, save_dir, filename)
else:
print("Please check data file!")
def do_tracking(fpath, savedir, event_name='images'): def do_tracking(fpath, savedir, event_name='images'):
''' '''
fpath: 算法各模块输出的data文件地址匹配 args:
savedir: 对 fpath 各模块输出的复现 fpath: 算法各模块输出的data文件地址匹配
分析具体视频时,需指定 fpath 和 savedir savedir: 对 fpath 各模块输出的复现;
分析具体视频时,需指定 fpath 和 savedir
outputs:
img_tracking目标跟踪轨迹、本地轨迹分析算法的轨迹对比图
abimg现场轨迹分析算法、轨迹选择输出的对比图
''' '''
# fpath = r'D:\contrast\dataset\1_to_n\709\20240709-102758_6971558612189\1_track.data' # fpath = r'D:\contrast\dataset\1_to_n\709\20240709-102758_6971558612189\1_track.data'
# savedir = r'D:\contrast\dataset\result\20240709-102843_6958770005357_6971558612189\error_6971558612189' # savedir = r'D:\contrast\dataset\result\20240709-102843_6958770005357_6971558612189\error_6971558612189'
@ -231,8 +142,10 @@ def do_tracking(fpath, savedir, event_name='images'):
bboxes, ffeats, trackerboxes, tracker_feat_dict, trackingboxes, tracking_feat_dict = extract_data(fpath) bboxes, ffeats, trackerboxes, tracker_feat_dict, trackingboxes, tracking_feat_dict = extract_data(fpath)
tracking_output_boxes, _ = read_tracking_output(tracking_output_path) tracking_output_boxes, _ = read_tracking_output(tracking_output_path)
'''1.2 利用本地跟踪算法生成各商品轨迹'''
# trackerboxes, tracker_feat_dict = tracking(bboxes, ffeats)
'''1.2 分别构造 2 个文件夹,(1) 存储画框后的图像; (2) 运动轨迹对应的 boxes子图''' '''1.3 分别构造 2 个文件夹,(1) 存储画框后的图像; (2) 运动轨迹对应的 boxes子图'''
save_dir = os.path.join(savedir, event_name) save_dir = os.path.join(savedir, event_name)
subimg_dir = os.path.join(savedir, event_name + '_subimgs') subimg_dir = os.path.join(savedir, event_name + '_subimgs')
if not os.path.exists(save_dir): if not os.path.exists(save_dir):
@ -241,8 +154,6 @@ def do_tracking(fpath, savedir, event_name='images'):
os.makedirs(subimg_dir) os.makedirs(subimg_dir)
'''2. 执行轨迹分析, 保存轨迹分析前后的对比图示''' '''2. 执行轨迹分析, 保存轨迹分析前后的对比图示'''
traj_graphic = event_name + '_' + CamerType traj_graphic = event_name + '_' + CamerType
if CamerType == '1': if CamerType == '1':
@ -344,24 +255,30 @@ def do_tracking(fpath, savedir, event_name='images'):
def tracking_simulate(eventpath, savepath): def tracking_simulate(eventpath, savepath):
'''args: '''args:
eventpath: 时间文件夹 eventpath: 事件文件夹
savepath: 存储文件夹 savepath: 存储文件夹
遍历eventpath
''' '''
'''1. 获取事件名''' # =============================================================================
event_names = os.path.basename(eventpath).strip().split('_') # '''1. 获取事件名'''
if len(event_names)==2 and len(event_names[1])>=8: # event_names = os.path.basename(eventpath).strip().split('_')
enent_name = event_names[1] # if len(event_names)==2 and len(event_names[1])>=8:
elif len(event_names)==2 and len(event_names[1])==0: # enent_name = event_names[1]
enent_name = event_names[0] # elif len(event_names)==2 and len(event_names[1])==0:
else: # enent_name = event_names[0]
return # else:
# return
# =============================================================================
enent_name = os.path.basename(eventpath)[:15]
'''2. 依次读取 0/1_track.data 中数据,进行仿真''' '''2. 依次读取 0/1_track.data 中数据,进行仿真'''
illu_tracking, illu_select = [], [] illu_tracking, illu_select = [], []
for filename in os.listdir(eventpath): for filename in os.listdir(eventpath):
# filename = '1_track.data' # filename = '1_track.data'
if filename.find("track.data") <= 0: continue if filename.find("track.data") < 0: continue
fpath = os.path.join(eventpath, filename) fpath = os.path.join(eventpath, filename)
if not os.path.isfile(fpath): continue if not os.path.isfile(fpath): continue
@ -451,7 +368,7 @@ def main_loop():
'''2. 循环执行操作事件:取出、放入、错误匹配''' '''2. 循环执行操作事件:取出、放入、错误匹配'''
for eventpath in tuple_paths: for eventpath in tuple_paths:
try: try:
tracking_simulate(eventpath, savepath) tracking_simulate(eventpath, savepath)
except Exception as e: except Exception as e:
print(f'Error! {eventpath}, {e}') print(f'Error! {eventpath}, {e}')
@ -462,29 +379,29 @@ def main_loop():
def main(): def main():
''' '''
eventpath: data文件地址该 data 文件包括 Pipeline 各模块输出 eventPaths: data文件地址该 data 文件包括 Pipeline 各模块输出
savepath: 包含二级目录,一级目录为轨迹图像;二级目录为与data文件对应的序列图像存储地址。 SavePath: 包含二级目录,一级目录为轨迹图像;二级目录为与data文件对应的序列图像存储地址。
''' '''
EventPaths = r'\\192.168.1.28\share\测试_202406\0723\0723_2' eventPaths = r'\\192.168.1.28\share\测试_202406\0723\0723_3'
SavePath = r'D:\contrast\dataset\result' savePath = r'D:\contrast\dataset\result'
k=0 k=0
for pathname in os.listdir(EventPaths): for pathname in os.listdir(eventPaths):
# pathname = "20240723-094731_6903148242797" pathname = "20240723-163121_6925282237668"
eventpath = os.path.join(EventPaths, pathname) eventpath = os.path.join(eventPaths, pathname)
savepath = os.path.join(SavePath, pathname) savepath = os.path.join(savePath, pathname)
if not os.path.exists(savepath): if not os.path.exists(savepath):
os.makedirs(savepath) os.makedirs(savepath)
# tracking_simulate(eventpath, savepath) tracking_simulate(eventpath, savepath)
try: # try:
tracking_simulate(eventpath, savepath) # tracking_simulate(eventpath, savepath)
except Exception as e: # except Exception as e:
print(f'Error! {eventpath}, {e}') # print(f'Error! {eventpath}, {e}')
# k += 1 k += 1
# if k==10: if k==1:
# break break
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -0,0 +1,6 @@
5幅图
incart.png
outcart.png
incart_ftmp.png
outcart_ftmp.png
cartboarder.png

View File

@ -36,10 +36,10 @@ def temp_add_boarder():
def create_front_temp(): def create_front_temp():
image = cv2.imread("image_front.png") image = cv2.imread("./iCart4/b.png")
Height, Width = image.shape[:2] Height, Width = image.shape[:2]
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh, binary = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY_INV) thresh, binary = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY_INV)
board = cv2.bitwise_not(binary) board = cv2.bitwise_not(binary)
contours, _ = cv2.findContours(board, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE) contours, _ = cv2.findContours(board, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
@ -48,12 +48,12 @@ def create_front_temp():
img = np.zeros((Height, Width), dtype=np.uint8) img = np.zeros((Height, Width), dtype=np.uint8)
cv2.drawContours(img, [cnt], -1, 255, 3) cv2.drawContours(img, [cnt], -1, 255, 3)
k += 1 k += 1
cv2.imwrite(f"fronttemp_{k}.png", img) cv2.imwrite(f"./iCart4/back{k}.png", img)
imgshow = cv2.drawContours(image, contours, -1, (0,255,0), 3) imgshow = cv2.drawContours(image, contours, -1, (0,255,0), 3)
cv2.imwrite("board_ftmp_line.png", imgshow) cv2.imwrite("./iCart4/board_back_line.png", imgshow)
# cv2.imwrite("4.png", board) # cv2.imwrite("./iCart4/4.png", board)
# cv2.imwrite("1.png", gray) # cv2.imwrite("1.png", gray)
# cv2.imwrite("2.png", binary) # cv2.imwrite("2.png", binary)

Binary file not shown.

98
tracking/time_test.py Normal file
View File

@ -0,0 +1,98 @@
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 13 09:39:42 2024
@author: ym
"""
import os
import time
import datetime
import numpy as np
import sys
sys.path.append(r"D:\DetectTracking")
from tracking.utils.read_data import extract_data, read_weight_timeConsuming
def main():
directory = r"\\192.168.1.28\share\测试_202406\0821\images"
TimeConsuming = []
DayHMS = []
for root, dirs, files in os.walk(directory):
if root.find('20240821') == -1: continue
for name in files:
if name.find('process.data') == -1: continue
datename = os.path.basename(root)[:15]
fpath = os.path.join(root, name)
WeightDict, SensorDict, ProcessTimeDict = read_weight_timeConsuming(fpath)
try:
t1 = ProcessTimeDict['algroDoStart'] # 算法处理的第一帧图像时间
t2 = ProcessTimeDict['breakinFirst'] # 第一次入侵时间
t3 = ProcessTimeDict['algroLastFrame'] # 算法处理的最后一帧图像时间
t4 = ProcessTimeDict['breakinLast'] # 最后一次入侵时间
t5 = ProcessTimeDict['weightStablityTime'] # 重力稳定时间
wv = ProcessTimeDict['weightValue'] # 重力值
t6 = ProcessTimeDict['YoloResnetTrackerEnd'] # Yolo、Resnet、tracker执行结束时间
t7 = ProcessTimeDict['trackingEnd'] # 轨迹分析结束时间
t8 = ProcessTimeDict['contrastEnd'] # 比对结束时间
t9 = ProcessTimeDict['algroStartToEnd'] # 算法从开始至结束时间
t10 = ProcessTimeDict['weightstablityToEnd'] # 重力稳定至算法结束时间
t11 = ProcessTimeDict['frameEndToEnd'] # 最后一帧图像至算法结束时间
TimeConsuming.append((t1, t2, t3, t4, t5, wv, t6, t7, t8, t9, t10, t11))
DayHMS.append(datename)
except Exception as e:
print(f'Error! {datename}, {e}')
TimeConsuming = np.array(TimeConsuming, dtype = np.int64)
TimeTotal = np.concatenate((TimeConsuming,
TimeConsuming[:,4][:, None] - TimeConsuming[:,0][:, None],
TimeConsuming[:,4][:, None] - TimeConsuming[:,2][:, None]), axis=1)
tt = TimeTotal[:, 3]==0
TimeTotal0 = TimeTotal[tt]
DayHMS0 = [DayHMS[ti] for i, ti in enumerate(tt) if ti]
TimeTotalMinus = TimeTotal[TimeTotal[:, 5]<0]
TimeTotalAdd = TimeTotal[TimeTotal[:, 5]>=0]
TimeTotalAdd0 = TimeTotalAdd[TimeTotalAdd[:,3] == 0]
TimeTotalAdd1 = TimeTotalAdd[TimeTotalAdd[:,3] != 0]
TimeTotalMinus0 = TimeTotalMinus[TimeTotalMinus[:,3] == 0]
TimeTotalMinus1 = TimeTotalMinus[TimeTotalMinus[:,3] != 0]
print(f"Total number is {len(TimeConsuming)}")
if __name__ == "__main__":
main()

View File

@ -163,7 +163,7 @@ class BOTSORT(BYTETracker):
'''1. reid 相似度阈值,低于该值的两 boxes 图像不可能是同一对象,需要确定一个合理的可信阈值 '''1. reid 相似度阈值,低于该值的两 boxes 图像不可能是同一对象,需要确定一个合理的可信阈值
2. iou 的约束为若约束,故 iou_dists 应设置为较大的值 2. iou 的约束为若约束,故 iou_dists 应设置为较大的值
''' '''
emb_dists_mask = (emb_dists > 0.65) emb_dists_mask = (emb_dists > 0.9)
iou_dists[emb_dists_mask] = 1 iou_dists[emb_dists_mask] = 1
emb_dists[iou_dists_mask] = 1 emb_dists[iou_dists_mask] = 1

View File

@ -0,0 +1,462 @@
import torch
import torch.nn as nn
from tools.config import config as conf
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
# from .utils import load_state_dict_from_url
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv1(x)
return self.sigmoid(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, cam=False, bam=False):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.cam = cam
self.bam = bam
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
if self.cam:
if planes == 64:
self.globalAvgPool = nn.AvgPool2d(56, stride=1)
elif planes == 128:
self.globalAvgPool = nn.AvgPool2d(28, stride=1)
elif planes == 256:
self.globalAvgPool = nn.AvgPool2d(14, stride=1)
elif planes == 512:
self.globalAvgPool = nn.AvgPool2d(7, stride=1)
self.fc1 = nn.Linear(in_features=planes, out_features=round(planes / 16))
self.fc2 = nn.Linear(in_features=round(planes / 16), out_features=planes)
self.sigmod = nn.Sigmoid()
if self.bam:
self.bam = SpatialAttention()
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
if self.cam:
ori_out = self.globalAvgPool(out)
out = out.view(out.size(0), -1)
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
out = self.sigmod(out)
out = out.view(out.size(0), out.size(-1), 1, 1)
out = out * ori_out
if self.bam:
out = out*self.bam(out)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, cam=False, bam=False):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
self.cam = cam
self.bam = bam
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
if self.cam:
if planes == 64:
self.globalAvgPool = nn.AvgPool2d(56, stride=1)
elif planes == 128:
self.globalAvgPool = nn.AvgPool2d(28, stride=1)
elif planes == 256:
self.globalAvgPool = nn.AvgPool2d(14, stride=1)
elif planes == 512:
self.globalAvgPool = nn.AvgPool2d(7, stride=1)
self.fc1 = nn.Linear(planes * self.expansion, round(planes / 4))
self.fc2 = nn.Linear(round(planes / 4), planes * self.expansion)
self.sigmod = nn.Sigmoid()
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
if self.cam:
ori_out = self.globalAvgPool(out)
out = out.view(out.size(0), -1)
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
out = self.sigmod(out)
out = out.view(out.size(0), out.size(-1), 1, 1)
out = out * ori_out
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=conf.embedding_size, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None, scale=0.75):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, int(64*scale), layers[0])
self.layer2 = self._make_layer(block, int(128*scale), layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, int(256*scale), layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, int(512*scale), layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(int(512 * block.expansion*scale), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# print('poolBefore', x.shape)
x = self.avgpool(x)
# print('poolAfter', x.shape)
x = torch.flatten(x, 1)
# print('fcBefore',x.shape)
x = self.fc(x)
# print('fcAfter',x.shape)
return x
def forward(self, x):
return self._forward_impl(x)
# def _resnet(arch, block, layers, pretrained, progress, **kwargs):
# model = ResNet(block, layers, **kwargs)
# if pretrained:
# state_dict = load_state_dict_from_url(model_urls[arch],
# progress=progress)
# model.load_state_dict(state_dict, strict=False)
# return model
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
src_state_dict = state_dict
target_state_dict = model.state_dict()
skip_keys = []
# skip mismatch size tensors in case of pretraining
for k in src_state_dict.keys():
if k not in target_state_dict:
continue
if src_state_dict[k].size() != target_state_dict[k].size():
skip_keys.append(k)
for k in skip_keys:
del src_state_dict[k]
missing_keys, unexpected_keys = model.load_state_dict(src_state_dict, strict=False)
return model
def resnet14(pretrained=True, progress=True, **kwargs):
r"""ResNet-14 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 1, 1, 2], pretrained, progress,
**kwargs)
def resnet18(pretrained=True, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)

View File

@ -107,6 +107,10 @@ def have_tracked():
plt.savefig(savedir) plt.savefig(savedir)
plt.close() plt.close()
edgeline = cv2.imread("./shopcart/cart_tempt/board_ftmp_line.png")
img_tracking = draw_all_trajectories(vts, edgeline, save_dir, file, draw5p=True)
else: else:
vts = doBackTracks(bboxes, TracksDict) vts = doBackTracks(bboxes, TracksDict)
vts.classify() vts.classify()
@ -114,7 +118,7 @@ def have_tracked():
save_subimgs(vts, file, TracksDict) save_subimgs(vts, file, TracksDict)
edgeline = cv2.imread("./shopcart/cart_tempt/edgeline.png") edgeline = cv2.imread("./shopcart/cart_tempt/edgeline.png")
draw_all_trajectories(vts, edgeline, save_dir, filename) img_tracking = draw_all_trajectories(vts, edgeline, save_dir, file)
print(file+f" need time: {gt.dt:.2f}s") print(file+f" need time: {gt.dt:.2f}s")
k += 1 k += 1

View File

@ -114,7 +114,7 @@ def draw_all_trajectories(vts, edgeline, save_dir, file, draw5p=False):
img = edgeline.copy() img = edgeline.copy()
img = draw5points(track, img) img = draw5points(track, img)
pth = trackpth.joinpath(f"{file}_{track.tid}.png") pth = trackpth.joinpath(f"{file}_{track.tid}_.png")
cv2.imwrite(str(pth), img) cv2.imwrite(str(pth), img)
# for track in vts.Residual: # for track in vts.Residual:
@ -307,11 +307,13 @@ def draw5points(track, img):
'''=============== 最小轨迹长度索引 ====================''' '''=============== 最小轨迹长度索引 ===================='''
if track.isBorder: trajlens = [int(t) for t in track.trajrects_wh]
if track.isCornpoint:
idx = 0 idx = 0
else: else:
idx = trajlens.index(min(trajlens)) idx = trajlens.index(min(trajlens))
'''=============== PCA ====================''' '''=============== PCA ===================='''
if trajlens[idx] > 12: if trajlens[idx] > 12:
X = cornpoints[:, 2*idx:2*(idx+1)] X = cornpoints[:, 2*idx:2*(idx+1)]

View File

@ -9,7 +9,8 @@ func: extract_data()
import numpy as np import numpy as np
import re import re
import os import os
from collections import OrderedDict
import matplotlib.pyplot as plt
@ -206,19 +207,130 @@ def read_deletedBarcode_file(filePth):
return all_list return all_list
def read_weight_timeConsuming(filePth):
WeightDict, SensorDict, ProcessTimeDict = OrderedDict(), OrderedDict(), OrderedDict()
with open(filePth, 'r', encoding='utf-8') as f:
lines = f.readlines()
for i, line in enumerate(lines):
line = line.strip()
if line.find(':') < 0: continue
if line.find("Weight") >= 0:
label = "Weight"
continue
if line.find("Sensor") >= 0:
label = "Sensor"
continue
if line.find("processTime") >= 0:
label = "ProcessTime"
continue
keyword = line.split(':')[0]
value = line.split(':')[1]
if label == "Weight":
WeightDict[keyword] = float(value.strip(','))
if label == "Sensor":
SensorDict[keyword] = [float(s) for s in value.split(',') if len(s)]
if label == "ProcessTime":
ProcessTimeDict[keyword] = float(value.strip(','))
# print("Done!")
return WeightDict, SensorDict, ProcessTimeDict
def plot_sensor_curve(WeightDict, SensorDict, ProcessTimeDict):
wtime, wdata = [], []
stime, sdata = [], []
for key, value in WeightDict.items():
wtime.append(int(key))
wdata.append(value)
for key, value in SensorDict.items():
if len(value) != 9: continue
stime.append(int(key))
sdata.append(np.array(value))
static_range = []
dynamic_range = []
windth = 8
nw = len(wdata)
assert(nw) >= 8, "The num of weight data is less than 8!"
i1, i2 = 0, 7
while i2 < nw:
data = wdata[i1:(i2+1)]
max(data) - min(data)
if i2<7:
i1 = 0
else:
i1 = i2-windth
min_t = min(wtime + stime)
wtime = [t-min_t for t in wtime]
stime = [t-min_t for t in stime]
max_t = max(wtime + stime)
fig = plt.figure(figsize=(16, 12))
gs = fig.add_gridspec(2, 1, left=0.1, right=0.9, bottom=0.1, top=0.9,
wspace=0.05, hspace=0.15)
# ax1, ax2 = axs
ax1 = fig.add_subplot(gs[0,0])
ax2 = fig.add_subplot(gs[1,0])
ax1.plot(wtime, wdata, 'b--', linewidth=2 )
for i in range(9):
ydata = [s[i] for s in sdata]
ax2.plot(stime, ydata, linewidth=2 )
ax1.grid(True), ax1.set_xlim(0, max_t), ax1.set_title('Weight')
ax1.set_label("(Time: ms)")
# ax1.legend()
ax2.grid(True), ax2.set_xlim(0, max_t), ax2.set_title('IMU')
# ax2.legend()
plt.show()
def main(file_path):
WeightDict, SensorDict, ProcessTimeDict = read_weight_timeConsuming(file_path)
plot_sensor_curve(WeightDict, SensorDict, ProcessTimeDict)
if __name__ == "__main__": if __name__ == "__main__":
files_path = 'D:/contrast/dataset/1_to_n/709/20240709-112658_6903148351833/'
# 遍历目录下的所有文件和目录 files_path = r'\\192.168.1.28\share\测试_202406\0814\0814\20240814-102227-62264578-a720-4eb9-b95e-cb8be009aa98_null'
k = 0
for filename in os.listdir(files_path): for filename in os.listdir(files_path):
filename = '1_track.data' filename = 'process.data'
file_path = os.path.join(files_path, filename) file_path = os.path.join(files_path, filename)
if os.path.isfile(file_path) and filename.find("track.data")>0: if os.path.isfile(file_path) and filename.find("track.data")>0:
extract_data(file_path) extract_data(file_path)
print("Done") if os.path.isfile(file_path) and filename.find("process.data")>=0:
main(file_path)
k += 1
if k == 1:
break
# print("Done")

View File

@ -14,38 +14,23 @@ import cv2
# import sys # import sys
# from scipy.spatial.distance import cdist # from scipy.spatial.distance import cdist
VideoFormat = ['.mp4', '.avi'] VideoFormat = ['.mp4', '.avi', '.ts']
def video2imgs(videopath, savepath): def video2imgs(videof, imgdir):
k = 0 cap = cv2.VideoCapture(videof)
have = False i = 0
for filename in os.listdir(videopath): while True:
file, ext = os.path.splitext(filename) ret, frame = cap.read()
if ext not in VideoFormat: if not ret:
continue
basename = os.path.basename(videopath)
imgbase = basename + '_' + file
imgdir = os.path.join(savepath, imgbase)
if not os.path.exists(imgdir):
os.mkdir(imgdir)
video = os.path.join(videopath, filename)
cap = cv2.VideoCapture(video)
i = 0
while True:
ret, frame = cap.read()
if not ret:
break
imgp = os.path.join(imgdir, file+f"_{i}.png")
i += 1
cv2.imwrite(imgp, frame)
cap.release()
print(filename + f" haved resolved")
k+=1
if k==1000:
break break
imgp = os.path.join(imgdir, f"{i}.png")
i += 1
cv2.imwrite(imgp, frame)
if i == 400:
break
cap.release()
print(os.path.basename(videof) + f" haved resolved")
def videosave(bboxes, videopath="100_1688009697927.mp4"): def videosave(bboxes, videopath="100_1688009697927.mp4"):
@ -95,10 +80,30 @@ def videosave(bboxes, videopath="100_1688009697927.mp4"):
cap.release() cap.release()
def main(): def main():
videopath = r'C:\Users\ym\Desktop' videopath = r'\\192.168.1.28\share\测试_202406\0822\A_1724314806144'
savepath = r'C:\Users\ym\Desktop' savepath = r'D:\badvideo'
video2imgs(videopath, savepath) # video2imgs(videopath, savepath)
k = 0
for filename in os.listdir(videopath):
filename = "20240822-163506_88e6409d-f19b-4e97-9f01-b3fde259cbff.ts"
file, ext = os.path.splitext(filename)
if ext not in VideoFormat:
continue
basename = os.path.basename(videopath)
imgbase = basename + '-&-' + file
imgdir = os.path.join(savepath, imgbase)
if not os.path.exists(imgdir):
os.mkdir(imgdir)
videof = os.path.join(videopath, filename)
video2imgs(videof, imgdir)
k += 1
if k == 1:
break
if __name__ == '__main__': if __name__ == '__main__':

35
tracking/说明文档.txt Normal file
View File

@ -0,0 +1,35 @@
tracking_test.py
have_tracked():
轨迹分析测试。遍历track_reid.py输出的文件夹trackdict下的所有.pkl文件。
time_test.py
统计Pipeline整体流程中各模块耗时
module_analysis.py
main():
遍历文件夹下的每一个子文件夹对子文件夹执行tracking_simulate() 函数;
main_loop()
(1) 根据 deletedBarcode.txt 生成事件对,并利用事件对生成存储地址
(2) 调用 tracking_simulate() 函数
tracking_simulate(eventpath, savepath)
(1) 根据event_names获取事件名enent_name
(2) 遍历并执行 eventpath 文件夹下的 0_track.data、1_track.data 文件并调用do_tracking() 执行
(3) 将前后摄、本地与现场工8幅子图合并为1幅大图。
do_tracking(fpath, savedir, event_name='images')
enentmatch.py
1:n 模拟测试have Deprecated!
contrast_analysis.py
1:n 现场测试评估。
main():
循环读取不同文件夹中的 deletedBarcode.txt合并评估。
main1():
指定deletedBarcode.txt进行1:n性能评估
feat_select.py
以下两种特征选择策略下的比对性能比较
(1) 现场算法前后摄特征组合;
(2) 本地算法优先选择前摄特征;