更新 detacttracking
This commit is contained in:
7
detecttracking/contrast/utils/__init__.py
Normal file
7
detecttracking/contrast/utils/__init__.py
Normal file
@ -0,0 +1,7 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Thu Sep 26 08:53:58 2024
|
||||
|
||||
@author: ym
|
||||
"""
|
||||
|
83
detecttracking/contrast/utils/barcode_set_operate.py
Normal file
83
detecttracking/contrast/utils/barcode_set_operate.py
Normal file
@ -0,0 +1,83 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Fri Sep 13 16:49:05 2024
|
||||
|
||||
比较 stdBcdpath 和 filepath 中的 barcodes 列表,求出二者的并集和为包含在
|
||||
stdBcdpath 中的 barcodes 清单
|
||||
|
||||
@author: ym
|
||||
"""
|
||||
import os
|
||||
from openpyxl import load_workbook, Workbook
|
||||
|
||||
def read_xlsx():
|
||||
stdBcdpath = r"\\192.168.1.28\share\已标注数据备份\对比数据\barcode\total_barcode_6588"
|
||||
filepath = r"\\192.168.1.28\share\联华中环店\中环店商品信息.xlsx"
|
||||
|
||||
existingPath = r'\\192.168.1.28\share\联华中环店\中环店商品信息_已有商品.xlsx'
|
||||
lackingPath = r'\\192.168.1.28\share\联华中环店\中环店商品信息_未包含商品.xlsx'
|
||||
|
||||
workbook = load_workbook(filename=filepath)
|
||||
sheet = workbook['Sheet1']
|
||||
barcodeCol = [sheet.cell(row=r, column=1).value for r in range(1, sheet.max_row+1)]
|
||||
|
||||
zhBarcodeList = [barcodeCol[i] for i in range(1, len(barcodeCol))]
|
||||
|
||||
stdBarcodeList = []
|
||||
for filename in os.listdir(stdBcdpath):
|
||||
filepath = os.path.join(stdBcdpath, filename)
|
||||
if not os.path.isdir(filepath) or not filename.isdigit():
|
||||
continue
|
||||
stdBarcodeList.append(int(filename))
|
||||
|
||||
|
||||
stdBarcodeSet = set(stdBarcodeList)
|
||||
zhBarcodeSet = set(zhBarcodeList)
|
||||
interBarcodes = list(zhBarcodeSet.intersection(stdBarcodeSet))
|
||||
|
||||
print(len(interBarcodes))
|
||||
|
||||
dest_wb1 = Workbook()
|
||||
dest_sheet1 = dest_wb1.active
|
||||
for row in sheet.iter_rows(min_row=1, max_col=sheet.max_column, values_only=True):
|
||||
if str(row[0]).find("商品条码")>=0:
|
||||
dest_sheet1.append(row)
|
||||
|
||||
if row[0] in interBarcodes:
|
||||
dest_sheet1.append(row)
|
||||
|
||||
dest_wb1.save(filename=existingPath)
|
||||
dest_wb1.close()
|
||||
|
||||
|
||||
diffBarcodes = list(zhBarcodeSet.difference(stdBarcodeSet))
|
||||
|
||||
dest_wb2 = Workbook()
|
||||
dest_sheet2 = dest_wb2.active
|
||||
for row in sheet.iter_rows(min_row=1, max_col=sheet.max_column, values_only=True):
|
||||
if str(row[0]).find("商品条码")>=0:
|
||||
dest_sheet2.append(row)
|
||||
|
||||
if row[0] in diffBarcodes:
|
||||
dest_sheet2.append(row)
|
||||
|
||||
dest_wb2.save(filename=lackingPath)
|
||||
dest_wb2.close()
|
||||
|
||||
|
||||
workbook.close()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# main()
|
||||
|
||||
read_xlsx()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
155
detecttracking/contrast/utils/dotest.py
Normal file
155
detecttracking/contrast/utils/dotest.py
Normal file
@ -0,0 +1,155 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Tue Dec 10 14:30:16 2024
|
||||
|
||||
@author: ym
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
import numpy as np
|
||||
sys.path.append(r"D:\DetectTracking")
|
||||
from tracking.utils.read_data import read_tracking_output, read_similar #, extract_data, read_deletedBarcode_file
|
||||
|
||||
|
||||
IMG_FORMAT = ['.bmp', '.jpg', '.jpeg', '.png']
|
||||
|
||||
|
||||
def creat_shopping_event(eventPath):
|
||||
'''构造放入商品事件字典,这些事件需满足条件:
|
||||
1) 前后摄至少有一条轨迹输出
|
||||
2) 保存有帧图像,以便裁剪出 boxe 子图
|
||||
'''
|
||||
|
||||
'''evtName 为一次购物事件'''
|
||||
evtName = os.path.basename(eventPath)
|
||||
evtList = evtName.split('_')
|
||||
|
||||
'''================ 0. 检查 evtName 及 eventPath 正确性和有效性 ================'''
|
||||
if evtName.find('2024')<0 and len(evtList[0])!=15:
|
||||
return
|
||||
if not os.path.isdir(eventPath):
|
||||
return
|
||||
|
||||
if len(evtList)==1 or (len(evtList)==2 and len(evtList[1])==0):
|
||||
barcode = ''
|
||||
else:
|
||||
barcode = evtList[-1]
|
||||
|
||||
if len(evtList)==3 and evtList[-1]== evtList[-2]:
|
||||
evtType = 'input'
|
||||
else:
|
||||
evtType = 'other'
|
||||
|
||||
'''================ 1. 构造事件描述字典,暂定 9 items ==============='''
|
||||
|
||||
|
||||
|
||||
|
||||
event = {}
|
||||
event['barcode'] = barcode
|
||||
event['type'] = evtType
|
||||
event['filepath'] = eventPath
|
||||
event['back_imgpaths'] = []
|
||||
event['front_imgpaths'] = []
|
||||
event['back_boxes'] = np.empty((0, 9), dtype=np.float64)
|
||||
event['front_boxes'] = np.empty((0, 9), dtype=np.float64)
|
||||
event['back_feats'] = np.empty((0, 256), dtype=np.float64)
|
||||
event['front_feats'] = np.empty((0, 256), dtype=np.float64)
|
||||
event['feats_compose'] = np.empty((0, 256), dtype=np.float64)
|
||||
event['one2one'] = None
|
||||
event['one2n'] = None
|
||||
event['feats_select'] = np.empty((0, 256), dtype=np.float64)
|
||||
|
||||
|
||||
'''================= 2. 读取 data 文件 ============================='''
|
||||
for dataname in os.listdir(eventPath):
|
||||
# filename = '1_track.data'
|
||||
datapath = os.path.join(eventPath, dataname)
|
||||
if not os.path.isfile(datapath): continue
|
||||
|
||||
CamerType = dataname.split('_')[0]
|
||||
''' 2.1 读取 0/1_track.data 中数据,暂不考虑'''
|
||||
# if dataname.find("_track.data")>0:
|
||||
# bboxes, ffeats, trackerboxes, tracker_feat_dict, trackingboxes, tracking_feat_dict = extract_data(datapath)
|
||||
|
||||
''' 2.2 读取 0/1_tracking_output.data 中数据'''
|
||||
if dataname.find("_tracking_output.data")>0:
|
||||
tracking_output_boxes, tracking_output_feats = read_tracking_output(datapath)
|
||||
if len(tracking_output_boxes) != len(tracking_output_feats): continue
|
||||
if CamerType == '0':
|
||||
event['back_boxes'] = tracking_output_boxes
|
||||
event['back_feats'] = tracking_output_feats
|
||||
elif CamerType == '1':
|
||||
event['front_boxes'] = tracking_output_boxes
|
||||
event['front_feats'] = tracking_output_feats
|
||||
|
||||
if dataname.find("process.data")==0:
|
||||
simiDict = read_similar(datapath)
|
||||
event['one2one'] = simiDict['one2one']
|
||||
event['one2n'] = simiDict['one2n']
|
||||
|
||||
|
||||
if len(event['back_boxes'])==0 or len(event['front_boxes'])==0:
|
||||
return None
|
||||
|
||||
'''2.3 事件的特征表征方式: 特征选择、特征集成'''
|
||||
bk_feats = event['back_feats']
|
||||
ft_feats = event['front_feats']
|
||||
|
||||
'''2.3.1 特征集成'''
|
||||
feats_compose = np.empty((0, 256), dtype=np.float64)
|
||||
if len(ft_feats):
|
||||
feats_compose = np.concatenate((feats_compose, ft_feats), axis=0)
|
||||
if len(bk_feats):
|
||||
feats_compose = np.concatenate((feats_compose, bk_feats), axis=0)
|
||||
event['feats_compose'] = feats_compose
|
||||
|
||||
'''2.3.1 特征选择'''
|
||||
if len(ft_feats):
|
||||
event['feats_select'] = ft_feats
|
||||
|
||||
|
||||
'''================ 3. 读取图像文件地址,并按照帧ID排序 ============='''
|
||||
frontImgs, frontFid = [], []
|
||||
backImgs, backFid = [], []
|
||||
for imgname in os.listdir(eventPath):
|
||||
name, ext = os.path.splitext(imgname)
|
||||
if ext not in IMG_FORMAT or name.find('frameId')<0: continue
|
||||
|
||||
CamerType = name.split('_')[0]
|
||||
frameId = int(name.split('_')[3])
|
||||
imgpath = os.path.join(eventPath, imgname)
|
||||
if CamerType == '0':
|
||||
backImgs.append(imgpath)
|
||||
backFid.append(frameId)
|
||||
if CamerType == '1':
|
||||
frontImgs.append(imgpath)
|
||||
frontFid.append(frameId)
|
||||
|
||||
frontIdx = np.argsort(np.array(frontFid))
|
||||
backIdx = np.argsort(np.array(backFid))
|
||||
|
||||
'''3.1 生成依据帧 ID 排序的前后摄图像地址列表'''
|
||||
frontImgs = [frontImgs[i] for i in frontIdx]
|
||||
backImgs = [backImgs[i] for i in backIdx]
|
||||
|
||||
'''3.2 将前、后摄图像路径添加至事件字典'''
|
||||
|
||||
|
||||
bfid = event['back_boxes'][:, 7].astype(np.int64)
|
||||
ffid = event['front_boxes'][:, 7].astype(np.int64)
|
||||
if len(bfid) and max(bfid) <= len(backImgs):
|
||||
event['back_imgpaths'] = [backImgs[i-1] for i in bfid]
|
||||
if len(ffid) and max(ffid) <= len(frontImgs):
|
||||
event['front_imgpaths'] = [frontImgs[i-1] for i in ffid]
|
||||
|
||||
|
||||
'''================ 4. 判断当前事件有效性,并添加至事件列表 =========='''
|
||||
condt1 = len(event['back_imgpaths'])==0 or len(event['front_imgpaths'])==0
|
||||
condt2 = len(event['front_feats'])==0 and len(event['back_feats'])==0
|
||||
|
||||
if condt1 or condt2:
|
||||
print(f"Event: {evtName}, Error, condt1: {condt1}, condt2: {condt2}")
|
||||
return None
|
||||
|
||||
return event
|
533
detecttracking/contrast/utils/event.py
Normal file
533
detecttracking/contrast/utils/event.py
Normal file
@ -0,0 +1,533 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Tue Nov 26 17:35:05 2024
|
||||
|
||||
@author: ym
|
||||
"""
|
||||
import os
|
||||
import cv2
|
||||
import pickle
|
||||
import numpy as np
|
||||
from pathlib import Path
|
||||
|
||||
import sys
|
||||
sys.path.append(r"D:\DetectTracking")
|
||||
from tracking.utils.plotting import Annotator, colors
|
||||
from tracking.utils.drawtracks import drawTrack
|
||||
from tracking.utils.read_data import extract_data, read_tracking_output, read_similar
|
||||
from tracking.utils.read_data import extract_data_realtime, read_tracking_output_realtime
|
||||
|
||||
IMG_FORMAT = ['.bmp', '.jpg', '.jpeg', '.png']
|
||||
VID_FORMAT = ['.mp4', '.avi']
|
||||
|
||||
def save_data(event, resultPath=None):
|
||||
'''事件轨迹子图保存'''
|
||||
if resultPath is None:
|
||||
resultPath = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
subimgpath = os.path.join(resultPath, f"{event.evtname}", "subimg")
|
||||
imgspath = os.path.join(resultPath, f"{event.evtname}", "imgs")
|
||||
if not os.path.exists(subimgpath):
|
||||
os.makedirs(subimgpath)
|
||||
if not os.path.exists(imgspath):
|
||||
os.makedirs(imgspath)
|
||||
##(2) 保存轨迹中的子图
|
||||
subimgpairs = event.save_event_subimg(subimgpath)
|
||||
for subimgName, subimg in subimgpairs:
|
||||
spath = os.path.join(subimgpath, subimgName)
|
||||
cv2.imwrite(spath, subimg)
|
||||
|
||||
##(3) 保存序列图像
|
||||
imgpairs = event.plot_save_image(imgspath)
|
||||
for imgname, img in imgpairs:
|
||||
spath = os.path.join(imgspath, imgname)
|
||||
cv2.imwrite(spath, img)
|
||||
##(4) 保存轨迹散点图
|
||||
img_cat = event.draw_tracks()
|
||||
trajpath = os.path.join(resultPath, "trajectory")
|
||||
if not os.path.exists(trajpath):
|
||||
os.makedirs(trajpath)
|
||||
traj_imgpath = os.path.join(trajpath, event.evtname+".png")
|
||||
cv2.imwrite(traj_imgpath, img_cat)
|
||||
|
||||
|
||||
def array2list(bboxes):
|
||||
'''
|
||||
将 bboxes 变换为 track 列表
|
||||
bboxes: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]
|
||||
Return:
|
||||
lboxes:列表,列表中元素具有同一 track_id,x1y1x2y2 格式
|
||||
[x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]
|
||||
'''
|
||||
lboxes = []
|
||||
if len(bboxes)==0:
|
||||
return []
|
||||
|
||||
trackID = np.unique(bboxes[:, 4].astype(int))
|
||||
track_ids = bboxes[:, 4].astype(int)
|
||||
for t_id in trackID:
|
||||
idx = np.where(track_ids == t_id)[0]
|
||||
box = bboxes[idx, :]
|
||||
lboxes.append(box)
|
||||
|
||||
return lboxes
|
||||
|
||||
|
||||
class ShoppingEvent:
|
||||
def __init__(self, eventpath, stype="data"):
|
||||
'''stype: str, 'source', 'data', 'realtime', 共三种 '''
|
||||
|
||||
self.eventpath = eventpath
|
||||
self.evtname = str(Path(eventpath).stem)
|
||||
self.barcode = ''
|
||||
self.evtType = ''
|
||||
|
||||
'''=========== path of image and video =========== '''
|
||||
self.back_videopath = ''
|
||||
self.front_videopath = ''
|
||||
self.back_imgpaths = []
|
||||
self.front_imgpaths = []
|
||||
|
||||
'''=========== process.data ==============================='''
|
||||
self.one2one = None
|
||||
self.one2n = None
|
||||
self.one2SN = None
|
||||
|
||||
'''=========== 0/1_track.data ============================='''
|
||||
self.back_yolobboxes = []
|
||||
self.back_yolofeats = []
|
||||
self.back_trackerboxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
|
||||
self.back_trackerfeats = {}
|
||||
self.back_trackingboxes = []
|
||||
self.back_trackingfeats = []
|
||||
|
||||
self.front_yolobboxes = []
|
||||
self.front_yolofeats = []
|
||||
self.front_trackerboxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
|
||||
self.front_trackerfeats = {}
|
||||
self.front_trackingboxes = []
|
||||
self.front_trackingfeats = []
|
||||
|
||||
'''=========== 0/1_tracking_output.data ==================='''
|
||||
self.back_boxes = []
|
||||
self.back_feats = []
|
||||
self.front_boxes = []
|
||||
self.front_feats = []
|
||||
|
||||
|
||||
if stype=="data":
|
||||
self.from_datafile(eventpath)
|
||||
if stype=="realtime":
|
||||
self.from_realtime_datafile(eventpath)
|
||||
if stype=="source":
|
||||
self.from_source_pkl(eventpath)
|
||||
|
||||
self.feats_select = np.empty((0, 256), dtype=np.float64)
|
||||
self.feats_compose = np.empty((0, 256), dtype=np.float64)
|
||||
self.select_feats()
|
||||
self.compose_feats()
|
||||
|
||||
# if stype=="image":
|
||||
# self.from_image(eventpath)
|
||||
|
||||
def kerndata(self, ShoppingDict, camtype="backCamera"):
|
||||
'''
|
||||
camtype: str, "backCamera" or "frontCamera"
|
||||
'''
|
||||
yoloboxes, resfeats = [], []
|
||||
trackerboxes = np.empty((0, 9), dtype=np.float64)
|
||||
trackefeats = {}
|
||||
trackingboxes, trackingfeats = [], []
|
||||
|
||||
frameDictList = ShoppingDict[camtype]["yoloResnetTracker"]
|
||||
for frameDict in frameDictList:
|
||||
yoloboxes.append(frameDict["bboxes"])
|
||||
|
||||
tboxes = frameDict["tboxes"]
|
||||
trackefeats.update(frameDict["feats"])
|
||||
|
||||
trackerboxes = np.concatenate((trackerboxes, np.array(tboxes)), axis=0)
|
||||
|
||||
Residual = ShoppingDict[camtype]["tracking"].Residual
|
||||
for track in Residual:
|
||||
trackingboxes.append(track.boxes)
|
||||
trackingfeats.append(track.features)
|
||||
kdata = (yoloboxes, resfeats, trackerboxes, trackefeats, trackingboxes, trackingfeats)
|
||||
|
||||
|
||||
tracking_out_boxes, tracking_out_feats = [], []
|
||||
Confirmed = ShoppingDict[camtype]["tracking"].Confirmed
|
||||
for track in Confirmed:
|
||||
tracking_out_boxes.append(track.boxes)
|
||||
tracking_out_feats.append(track.features)
|
||||
outdata = (tracking_out_boxes, tracking_out_feats)
|
||||
|
||||
return kdata, outdata
|
||||
|
||||
|
||||
def from_source_pkl(self, eventpath):
|
||||
with open(eventpath, 'rb') as f:
|
||||
ShoppingDict = pickle.load(f)
|
||||
|
||||
self.eventpath = ShoppingDict["eventPath"]
|
||||
self.evtname = ShoppingDict["eventName"]
|
||||
self.barcode = ShoppingDict["barcode"]
|
||||
|
||||
if len(ShoppingDict["one2n"]):
|
||||
self.one2n = ShoppingDict["one2n"]
|
||||
|
||||
'''=========== path of image and video =========== '''
|
||||
self.back_videopath = ShoppingDict["backCamera"]["videoPath"]
|
||||
self.front_videopath = ShoppingDict["frontCamera"]["videoPath"]
|
||||
self.back_imgpaths = ShoppingDict["backCamera"]["imagePaths"]
|
||||
self.front_imgpaths = ShoppingDict["frontCamera"]["imagePaths"]
|
||||
|
||||
|
||||
'''===========对应于 0/1_track.data ============================='''
|
||||
backdata, back_outdata = self.kerndata(ShoppingDict, "backCamera")
|
||||
frontdata, front_outdata = self.kerndata(ShoppingDict, "frontCamera")
|
||||
self.back_yolobboxes = backdata[0]
|
||||
self.back_yolofeats = backdata[1]
|
||||
self.back_trackerboxes = backdata[2]
|
||||
self.back_trackerfeats = [3]
|
||||
self.back_trackingboxes = [4]
|
||||
self.back_trackingfeats = [5]
|
||||
|
||||
self.front_yolobboxes = frontdata[0]
|
||||
self.front_yolofeats = frontdata[1]
|
||||
self.front_trackerboxes = frontdata[2]
|
||||
self.front_trackerfeats = frontdata[3]
|
||||
self.front_trackingboxes = frontdata[4]
|
||||
self.front_trackingfeats = frontdata[5]
|
||||
|
||||
'''===========对应于 0/1_tracking_output.data ============================='''
|
||||
self.back_boxes = back_outdata[0]
|
||||
self.back_feats = back_outdata[1]
|
||||
self.front_boxes = front_outdata[0]
|
||||
self.front_feats = front_outdata[1]
|
||||
|
||||
|
||||
def from_datafile(self, eventpath):
|
||||
evtList = self.evtname.split('_')
|
||||
if len(evtList)>=2 and len(evtList[-1])>=10 and evtList[-1].isdigit():
|
||||
self.barcode = evtList[-1]
|
||||
if len(evtList)==3 and evtList[-1]== evtList[-2]:
|
||||
self.evtType = 'input'
|
||||
else:
|
||||
self.evtType = 'other'
|
||||
|
||||
'''================ path of image ============='''
|
||||
frontImgs, frontFid = [], []
|
||||
backImgs, backFid = [], []
|
||||
for imgname in os.listdir(eventpath):
|
||||
name, ext = os.path.splitext(imgname)
|
||||
if ext not in IMG_FORMAT or name.find('frameId') < 0: continue
|
||||
if len(name.split('_')) != 3 and not name.split('_')[3].isdigit(): continue
|
||||
|
||||
CamerType = name.split('_')[0]
|
||||
frameId = int(name.split('_')[3])
|
||||
imgpath = os.path.join(eventpath, imgname)
|
||||
if CamerType == '0':
|
||||
backImgs.append(imgpath)
|
||||
backFid.append(frameId)
|
||||
if CamerType == '1':
|
||||
frontImgs.append(imgpath)
|
||||
frontFid.append(frameId)
|
||||
## 生成依据帧 ID 排序的前后摄图像地址列表
|
||||
frontIdx = np.argsort(np.array(frontFid))
|
||||
backIdx = np.argsort(np.array(backFid))
|
||||
self.front_imgpaths = [frontImgs[i] for i in frontIdx]
|
||||
self.back_imgpaths = [backImgs[i] for i in backIdx]
|
||||
|
||||
|
||||
'''================ path of video ============='''
|
||||
for vidname in os.listdir(eventpath):
|
||||
name, ext = os.path.splitext(vidname)
|
||||
if ext not in VID_FORMAT: continue
|
||||
vidpath = os.path.join(eventpath, vidname)
|
||||
|
||||
CamerType = name.split('_')[0]
|
||||
if CamerType == '0':
|
||||
self.back_videopath = vidpath
|
||||
if CamerType == '1':
|
||||
self.front_videopath = vidpath
|
||||
|
||||
'''================ process.data ============='''
|
||||
procpath = Path(eventpath).joinpath('process.data')
|
||||
if procpath.is_file():
|
||||
SimiDict = read_similar(procpath)
|
||||
self.one2one = SimiDict['one2one']
|
||||
self.one2n = SimiDict['one2n']
|
||||
self.one2SN = SimiDict['one2SN']
|
||||
|
||||
'''=========== 0/1_track.data & 0/1_tracking_output.data ======='''
|
||||
for dataname in os.listdir(eventpath):
|
||||
datapath = os.path.join(eventpath, dataname)
|
||||
if not os.path.isfile(datapath): continue
|
||||
CamerType = dataname.split('_')[0]
|
||||
|
||||
'''========== 0/1_track.data =========='''
|
||||
if dataname.find("_track.data")>0:
|
||||
bboxes, ffeats, trackerboxes, trackerfeats, trackingboxes, trackingfeats = extract_data(datapath)
|
||||
if CamerType == '0':
|
||||
self.back_yolobboxes = bboxes
|
||||
self.back_yolofeats = ffeats
|
||||
self.back_trackerboxes = trackerboxes
|
||||
self.back_trackerfeats = trackerfeats
|
||||
self.back_trackingboxes = trackingboxes
|
||||
self.back_trackingfeats = trackingfeats
|
||||
if CamerType == '1':
|
||||
self.front_yolobboxes = bboxes
|
||||
self.front_yolofeats = ffeats
|
||||
self.front_trackerboxes = trackerboxes
|
||||
self.front_trackerfeats = trackerfeats
|
||||
self.front_trackingboxes = trackingboxes
|
||||
self.front_trackingfeats = trackingfeats
|
||||
|
||||
'''========== 0/1_tracking_output.data =========='''
|
||||
if dataname.find("_tracking_output.data")>0:
|
||||
tracking_output_boxes, tracking_output_feats = read_tracking_output(datapath)
|
||||
if CamerType == '0':
|
||||
self.back_boxes = tracking_output_boxes
|
||||
self.back_feats = tracking_output_feats
|
||||
elif CamerType == '1':
|
||||
self.front_boxes = tracking_output_boxes
|
||||
self.front_feats = tracking_output_feats
|
||||
|
||||
def from_realtime_datafile(self, eventpath):
|
||||
# evtList = self.evtname.split('_')
|
||||
# if len(evtList)>=2 and len(evtList[-1])>=10 and evtList[-1].isdigit():
|
||||
# self.barcode = evtList[-1]
|
||||
# if len(evtList)==3 and evtList[-1]== evtList[-2]:
|
||||
# self.evtType = 'input'
|
||||
# else:
|
||||
# self.evtType = 'other'
|
||||
|
||||
'''================ path of video ============='''
|
||||
for vidname in os.listdir(eventpath):
|
||||
name, ext = os.path.splitext(vidname)
|
||||
if ext not in VID_FORMAT: continue
|
||||
vidpath = os.path.join(eventpath, vidname)
|
||||
|
||||
CamerType = name.split('_')[0]
|
||||
if CamerType == '0':
|
||||
self.back_videopath = vidpath
|
||||
if CamerType == '1':
|
||||
self.front_videopath = vidpath
|
||||
|
||||
'''================ process.data ============='''
|
||||
procpath = Path(eventpath).joinpath('process.data')
|
||||
if procpath.is_file():
|
||||
SimiDict = read_similar(procpath)
|
||||
self.one2one = SimiDict['one2one']
|
||||
self.one2n = SimiDict['one2n']
|
||||
self.one2SN = SimiDict['one2SN']
|
||||
|
||||
'''=========== 0/1_track.data & 0/1_tracking_output.data ======='''
|
||||
for dataname in os.listdir(eventpath):
|
||||
datapath = os.path.join(eventpath, dataname)
|
||||
if not os.path.isfile(datapath): continue
|
||||
CamerType = dataname.split('_')[0]
|
||||
'''========== 0/1_track.data =========='''
|
||||
if dataname.find("_track.data")>0:
|
||||
trackerboxes, trackerfeats = extract_data_realtime(datapath)
|
||||
if CamerType == '0':
|
||||
self.back_trackerboxes = trackerboxes
|
||||
self.back_trackerfeats = trackerfeats
|
||||
|
||||
if CamerType == '1':
|
||||
self.front_trackerboxes = trackerboxes
|
||||
self.front_trackerfeats = trackerfeats
|
||||
'''========== 0/1_tracking_output.data =========='''
|
||||
if dataname.find("_tracking_output.data")>0:
|
||||
trackingboxes, trackingfeats, tracking_outboxes, tracking_outfeats = read_tracking_output_realtime(datapath)
|
||||
if CamerType == '0':
|
||||
self.back_trackingboxes = trackingboxes
|
||||
self.back_trackingfeats = trackingfeats
|
||||
self.back_boxes = tracking_outboxes
|
||||
self.back_feats = tracking_outfeats
|
||||
elif CamerType == '1':
|
||||
self.front_trackingboxes = trackingboxes
|
||||
self.front_trackingfeats = trackingfeats
|
||||
self.front_boxes = tracking_outboxes
|
||||
self.front_feats = tracking_outfeats
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def compose_feats(self):
|
||||
'''事件的特征集成'''
|
||||
feats_compose = np.empty((0, 256), dtype=np.float64)
|
||||
if len(self.front_feats):
|
||||
for feat in self.front_feats:
|
||||
feats_compose = np.concatenate((feats_compose, feat), axis=0)
|
||||
if len(self.back_feats):
|
||||
for feat in self.back_feats:
|
||||
feats_compose = np.concatenate((feats_compose, feat), axis=0)
|
||||
self.feats_compose = feats_compose
|
||||
|
||||
def select_feats(self):
|
||||
'''事件的特征选择'''
|
||||
if len(self.front_feats):
|
||||
self.feats_select = self.front_feats[0]
|
||||
elif len(self.back_feats):
|
||||
self.feats_select = self.back_feats[0]
|
||||
|
||||
def plot_save_image(self, savepath):
|
||||
|
||||
def array2list(bboxes):
|
||||
'''[x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]'''
|
||||
frame_ids = bboxes[:, 7].astype(int)
|
||||
fID = np.unique(bboxes[:, 7].astype(int))
|
||||
fboxes = []
|
||||
for f_id in fID:
|
||||
idx = np.where(frame_ids==f_id)[0]
|
||||
box = bboxes[idx, :]
|
||||
fboxes.append((f_id, box))
|
||||
return fboxes
|
||||
|
||||
imgpairs = []
|
||||
cameras = ('front', 'back')
|
||||
for camera in cameras:
|
||||
if camera == 'front':
|
||||
boxes = self.front_trackerboxes
|
||||
imgpaths = self.front_imgpaths
|
||||
else:
|
||||
boxes = self.back_trackerboxes
|
||||
imgpaths = self.back_imgpaths
|
||||
|
||||
fboxes = array2list(boxes)
|
||||
for fid, fbox in fboxes:
|
||||
imgpath = imgpaths[int(fid-1)]
|
||||
|
||||
image = cv2.imread(imgpath)
|
||||
|
||||
annotator = Annotator(image.copy(), line_width=2)
|
||||
for i, box in enumerate(fbox):
|
||||
x1, y1, x2, y2, tid, score, cls, fid, bid = box
|
||||
label = f'{int(tid), int(cls)}'
|
||||
if tid >=0 and cls==0:
|
||||
color = colors(int(cls), True)
|
||||
elif tid >=0 and cls!=0:
|
||||
color = colors(int(tid), True)
|
||||
else:
|
||||
color = colors(19, True) # 19为调色板的最后一个元素
|
||||
xyxy = (x1/2, y1/2, x2/2, y2/2)
|
||||
annotator.box_label(xyxy, label, color=color)
|
||||
|
||||
im0 = annotator.result()
|
||||
|
||||
imgpairs.append((Path(imgpath).name, im0))
|
||||
|
||||
# spath = os.path.join(savepath, Path(imgpath).name)
|
||||
|
||||
|
||||
# cv2.imwrite(spath, im0)
|
||||
return imgpairs
|
||||
|
||||
|
||||
def save_event_subimg(self, savepath):
|
||||
'''
|
||||
功能: 保存一次购物事件的轨迹子图
|
||||
9 items: barcode, type, filepath, back_imgpaths, front_imgpaths,
|
||||
back_boxes, front_boxes, back_feats, front_feats,
|
||||
feats_compose, feats_select
|
||||
子图保存次序:先前摄、后后摄,以 k 为编号,和 "feats_compose" 中次序相同
|
||||
'''
|
||||
imgpairs = []
|
||||
cameras = ('front', 'back')
|
||||
for camera in cameras:
|
||||
boxes = np.empty((0, 9), dtype=np.float64) ##和类doTracks兼容
|
||||
if camera == 'front':
|
||||
for b in self.front_boxes:
|
||||
boxes = np.concatenate((boxes, b), axis=0)
|
||||
imgpaths = self.front_imgpaths
|
||||
else:
|
||||
for b in self.back_boxes:
|
||||
boxes = np.concatenate((boxes, b), axis=0)
|
||||
imgpaths = self.back_imgpaths
|
||||
|
||||
for i, box in enumerate(boxes):
|
||||
x1, y1, x2, y2, tid, score, cls, fid, bid = box
|
||||
|
||||
imgpath = imgpaths[int(fid-1)]
|
||||
image = cv2.imread(imgpath)
|
||||
|
||||
subimg = image[int(y1/2):int(y2/2), int(x1/2):int(x2/2), :]
|
||||
|
||||
camerType, timeTamp, _, frameID = os.path.basename(imgpath).split('.')[0].split('_')
|
||||
subimgName = f"cam{camerType}_{i}_tid{int(tid)}_fid({int(fid)}, {frameID}).png"
|
||||
|
||||
imgpairs.append((subimgName, subimg))
|
||||
|
||||
# spath = os.path.join(savepath, subimgName)
|
||||
|
||||
# cv2.imwrite(spath, subimg)
|
||||
return imgpairs
|
||||
# basename = os.path.basename(event['filepath'])
|
||||
print(f"Image saved: {os.path.basename(self.eventpath)}")
|
||||
|
||||
def draw_tracks(self):
|
||||
front_edge = cv2.imread(r"D:\DetectTracking\tracking\shopcart\cart_tempt\board_ftmp_line.png")
|
||||
back_edge = cv2.imread(r"D:\DetectTracking\tracking\shopcart\cart_tempt\edgeline.png")
|
||||
|
||||
front_trackerboxes = array2list(self.front_trackerboxes)
|
||||
back_trackerboxes = array2list(self.back_trackerboxes)
|
||||
|
||||
# img1, img2 = edgeline.copy(), edgeline.copy()
|
||||
img1 = drawTrack(front_trackerboxes, front_edge.copy())
|
||||
img2 = drawTrack(self.front_trackingboxes, front_edge.copy())
|
||||
|
||||
img3 = drawTrack(back_trackerboxes, back_edge.copy())
|
||||
img4 = drawTrack(self.back_trackingboxes, back_edge.copy())
|
||||
|
||||
|
||||
|
||||
imgcat1 = np.concatenate((img1, img2), axis = 1)
|
||||
H, W = imgcat1.shape[:2]
|
||||
cv2.line(imgcat1, (int(W/2), 0), (int(W/2), H), (128, 255, 128), 2)
|
||||
|
||||
imgcat2 = np.concatenate((img3, img4), axis = 1)
|
||||
H, W = imgcat2.shape[:2]
|
||||
cv2.line(imgcat2, (int(W/2), 0), (int(W/2), H), (128, 255, 128), 2)
|
||||
|
||||
|
||||
illus = [imgcat1, imgcat2]
|
||||
if len(illus):
|
||||
img_cat = np.concatenate(illus, axis = 1)
|
||||
if len(illus)==2:
|
||||
H, W = img_cat.shape[:2]
|
||||
cv2.line(img_cat, (int(W/2), 0), (int(W/2), int(H)), (128, 128, 255), 3)
|
||||
|
||||
return img_cat
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
# pklpath = r"D:\DetectTracking\evtresult\images2\ShoppingDict.pkl"
|
||||
# evt = ShoppingEvent(pklpath, stype='source')
|
||||
|
||||
evtpath = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\images\20241209-160248-08edd5f6-1806-45ad-babf-7a4dd11cea60_6973226721445"
|
||||
evt = ShoppingEvent(evtpath, stype='data')
|
||||
|
||||
img_cat = evt.draw_tracks()
|
||||
|
||||
cv2.imwrite("a.png", img_cat)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
# main1()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
56
detecttracking/contrast/utils/tools.py
Normal file
56
detecttracking/contrast/utils/tools.py
Normal file
@ -0,0 +1,56 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Created on Thu Oct 31 15:17:01 2024
|
||||
|
||||
@author: ym
|
||||
"""
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
|
||||
def showHist(err, correct):
|
||||
err = np.array(err)
|
||||
correct = np.array(correct)
|
||||
|
||||
fig, axs = plt.subplots(2, 1)
|
||||
axs[0].hist(err, bins=50, edgecolor='black')
|
||||
axs[0].set_xlim([0, 1])
|
||||
axs[0].set_title('err')
|
||||
|
||||
axs[1].hist(correct, bins=50, edgecolor='black')
|
||||
axs[1].set_xlim([0, 1])
|
||||
axs[1].set_title('correct')
|
||||
# plt.show()
|
||||
|
||||
return plt
|
||||
|
||||
def show_recall_prec(recall, prec, ths):
|
||||
# x = np.linspace(start=-0, stop=1, num=11, endpoint=True).tolist()
|
||||
fig = plt.figure(figsize=(10, 6))
|
||||
plt.plot(ths, recall, color='red', label='recall')
|
||||
plt.plot(ths, prec, color='blue', label='PrecisePos')
|
||||
plt.legend()
|
||||
plt.xlabel(f'threshold')
|
||||
# plt.ylabel('Similarity')
|
||||
plt.grid(True, linestyle='--', alpha=0.5)
|
||||
# plt.savefig('accuracy_recall_grid.png')
|
||||
# plt.show()
|
||||
# plt.close()
|
||||
|
||||
return plt
|
||||
|
||||
|
||||
def compute_recall_precision(err_similarity, correct_similarity):
|
||||
ths = np.linspace(0, 1, 51)
|
||||
recall, prec = [], []
|
||||
for th in ths:
|
||||
TP = len([num for num in correct_similarity if num >= th])
|
||||
FP = len([num for num in err_similarity if num >= th])
|
||||
if (TP+FP) == 0:
|
||||
prec.append(1)
|
||||
recall.append(0)
|
||||
else:
|
||||
prec.append(TP / (TP + FP))
|
||||
recall.append(TP / (len(err_similarity) + len(correct_similarity)))
|
||||
return recall, prec, ths
|
182
detecttracking/contrast/utils/write_feature_json.py
Normal file
182
detecttracking/contrast/utils/write_feature_json.py
Normal file
@ -0,0 +1,182 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
@author: LiChen
|
||||
"""
|
||||
import json
|
||||
import os
|
||||
import pickle
|
||||
import numpy as np
|
||||
|
||||
import sys
|
||||
sys.path.append(r"D:\DetectTracking\contrast")
|
||||
|
||||
from config import config as conf
|
||||
# from img_data import library_imgs, temp_imgs, main_library_imgs, main_imgs_2
|
||||
# from test_logic import initModel,getFeatureList
|
||||
from model import resnet18
|
||||
import torch
|
||||
from PIL import Image
|
||||
|
||||
device = conf.device
|
||||
|
||||
def initModel():
|
||||
model = resnet18().to(device)
|
||||
model.load_state_dict(torch.load(conf.test_model, map_location=conf.device))
|
||||
model.eval()
|
||||
return model
|
||||
|
||||
|
||||
from PIL import Image
|
||||
|
||||
|
||||
def convert_rgba_to_rgb(image_path, output_path=None):
|
||||
"""
|
||||
将给定路径的4通道PNG图像转换为3通道,并保存到指定输出路径。
|
||||
|
||||
:param image_path: 输入图像的路径
|
||||
:param output_path: 转换后的图像保存路径
|
||||
"""
|
||||
# 打开图像
|
||||
img = Image.open(image_path)
|
||||
# 转换图像模式从RGBA到RGB
|
||||
# .convert('RGB')会丢弃Alpha通道并转换为纯RGB图像
|
||||
if img.mode == 'RGBA':
|
||||
# 转换为RGB模式
|
||||
img_rgb = img.convert('RGB')
|
||||
# 保存转换后的图像
|
||||
img_rgb.save(image_path)
|
||||
print(f"Image converted from RGBA to RGB and saved to {image_path}")
|
||||
# else:
|
||||
# # 如果已经是RGB或其他模式,直接保存
|
||||
# img.save(image_path)
|
||||
# print(f"Image already in {img.mode} mode, saved to {image_path}")
|
||||
|
||||
|
||||
def test_preprocess(images: list, actionModel=False) -> torch.Tensor:
|
||||
res = []
|
||||
for img in images:
|
||||
try:
|
||||
print(img)
|
||||
im = conf.test_transform(img) if actionModel else conf.test_transform(Image.open(img))
|
||||
res.append(im)
|
||||
except:
|
||||
continue
|
||||
data = torch.stack(res)
|
||||
return data
|
||||
|
||||
|
||||
def inference(images, model, actionModel=False):
|
||||
data = test_preprocess(images, actionModel)
|
||||
if torch.cuda.is_available():
|
||||
data = data.to(conf.device)
|
||||
features = model(data)
|
||||
return features
|
||||
|
||||
|
||||
def group_image(images, batch=64) -> list:
|
||||
"""Group image paths by batch size"""
|
||||
size = len(images)
|
||||
res = []
|
||||
for i in range(0, size, batch):
|
||||
end = min(batch + i, size)
|
||||
res.append(images[i:end])
|
||||
return res
|
||||
|
||||
def getFeatureList(barList, imgList, model):
|
||||
featList = [[] for i in range(len(barList))]
|
||||
for index, feat in enumerate(imgList):
|
||||
groups = group_image(feat)
|
||||
for group in groups:
|
||||
feat_tensor = inference(group, model)
|
||||
for fe in feat_tensor:
|
||||
if fe.device == 'cpu':
|
||||
fe_np = fe.squeeze().detach().numpy()
|
||||
else:
|
||||
fe_np = fe.squeeze().detach().cpu().numpy()
|
||||
featList[index].append(fe_np)
|
||||
return featList
|
||||
|
||||
def get_files(folder):
|
||||
file_dict = {}
|
||||
cnt = 0
|
||||
# barcode_list = ['6944649700065', '6924743915848', '6920459905012', '6901285991219', '6924882406269']
|
||||
for root, dirs, files in os.walk(folder):
|
||||
|
||||
folder_name = os.path.basename(root) # 获取当前文件夹名称
|
||||
print(folder_name)
|
||||
# with open('main_barcode.txt','a') as f:
|
||||
# f.write(folder_name + '\n')
|
||||
|
||||
# if len(dirs) == 0 and len(files) > 0 and folder_name in barcode_list: # 如果该文件夹没有子文件夹且有文件
|
||||
if len(dirs) == 0 and len(files) > 0: # 如果该文件夹没有子文件夹且有文件
|
||||
|
||||
file_names = [os.path.join(root, file) for file in files] # 获取所有文件名
|
||||
for file_name in file_names:
|
||||
try:
|
||||
convert_rgba_to_rgb(file_name)
|
||||
except:
|
||||
file_names.remove(file_name)
|
||||
cnt += len(file_names)
|
||||
file_dict[folder_name] = file_names
|
||||
print(cnt)
|
||||
|
||||
return file_dict
|
||||
|
||||
def normalize(queFeatList):
|
||||
for num1 in range(len(queFeatList)):
|
||||
for num2 in range(len(queFeatList[num1])):
|
||||
queFeatList[num1][num2] = queFeatList[num1][num2] / np.linalg.norm(queFeatList[num1][num2])
|
||||
return queFeatList
|
||||
def img2feature(imgs_dict, model, barcode_flag):
|
||||
if not len(imgs_dict) > 0:
|
||||
raise ValueError("No imgs files provided")
|
||||
queBarIdList = list(imgs_dict.keys())
|
||||
queImgsList = list(imgs_dict.values())
|
||||
queFeatList = getFeatureList(queBarIdList, queImgsList, model)
|
||||
queFeatList = normalize(queFeatList)
|
||||
return queBarIdList, queFeatList
|
||||
|
||||
|
||||
def createFeatureDict(imgs_dict, model,
|
||||
barcode_flag=False): ##imgs->{barcode1:[img1_1...img1_n], barcode2:[img2_1...img2_n]}
|
||||
dicts_all = {}
|
||||
value_list = []
|
||||
barcode_list, imgs_list = img2feature(imgs_dict, model, barcode_flag=False)
|
||||
for i in range(len(barcode_list)):
|
||||
dicts = {}
|
||||
|
||||
imgs_list_ = []
|
||||
for j in range(len(imgs_list[i])):
|
||||
imgs_list_.append(imgs_list[i][j].tolist())
|
||||
# with open('feature.txt','a') as f:
|
||||
# f.write(str(imgs_list[i][j].tolist())+'\n')
|
||||
|
||||
dicts['key'] = barcode_list[i]
|
||||
dicts['value'] = imgs_list_
|
||||
value_list.append(dicts)
|
||||
dicts_all['total'] = value_list
|
||||
print('dicts_all', dicts_all)
|
||||
with open('data_0909.json', 'a') as json_file:
|
||||
json.dump(dicts_all, json_file)
|
||||
|
||||
|
||||
def read_pkl_file(file_path):
|
||||
with open(file_path, 'rb') as file:
|
||||
data = pickle.load(file)
|
||||
return data
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
###将图片名称和模型推理特征向量字典存为json文件
|
||||
img_path = 'data/2000_train/base'
|
||||
imgs_dict = get_files(img_path)
|
||||
# print('imgs_dict', imgs_dict)
|
||||
model = initModel()
|
||||
createFeatureDict(imgs_dict, model, barcode_flag=False)
|
||||
###=======================================================
|
||||
# ## =========pkl转json================
|
||||
# contents = read_pkl_file('dicts_list_1887.pkl')
|
||||
# print(contents)
|
||||
# with open('data_1887.json', 'w') as json_file:
|
||||
# json.dump(contents, json_file)
|
Reference in New Issue
Block a user