Files
detecttracking/contrast/utils/event.py
2024-12-05 10:23:03 +08:00

179 lines
7.1 KiB
Python

# -*- coding: utf-8 -*-
"""
Created on Tue Nov 26 17:35:05 2024
@author: ym
"""
import os
import numpy as np
from pathlib import Path
import sys
sys.path.append(r"D:\DetectTracking")
from tracking.utils.read_data import extract_data, read_tracking_output, read_similar
IMG_FORMAT = ['.bmp', '.jpg', '.jpeg', '.png']
VID_FORMAT = ['.mp4', '.avi']
class Event:
def __init__(self, eventpath, stype="data"):
'''stype: str, 'video', 'image', 'data', '''
self.eventpath = eventpath
self.evtname = str(Path(eventpath).stem)
self.barcode = ''
self.evtType = ''
'''=========== path of image and video =========== '''
self.back_videopath = ''
self.front_videopath = ''
self.back_imgpaths = []
self.front_imgpaths = []
'''=========== process.data ==============================='''
self.one2one = None
self.one2n = None
'''=========== 0/1_track.data ============================='''
self.back_yolobboxes = np.empty((0, 6), dtype=np.float64)
self.back_yolofeats = np.empty((0, 256), dtype=np.float64)
self.back_trackerboxes = np.empty((0, 9), dtype=np.float64)
self.back_trackerfeats = np.empty((0, 256), dtype=np.float64)
self.back_trackingboxes = np.empty((0, 9), dtype=np.float64)
self.back_trackingfeats = np.empty((0, 256), dtype=np.float64)
self.front_yolobboxes = np.empty((0, 6), dtype=np.float64)
self.front_yolofeats = np.empty((0, 256), dtype=np.float64)
self.front_trackerboxes = np.empty((0, 9), dtype=np.float64)
self.front_trackerfeats = np.empty((0, 256), dtype=np.float64)
self.front_trackingboxes = np.empty((0, 9), dtype=np.float64)
self.front_trackingfeats = np.empty((0, 256), dtype=np.float64)
'''=========== 0/1_tracking_output.data ==================='''
self.back_boxes = np.empty((0, 9), dtype=np.float64)
self.front_boxes = np.empty((0, 9), dtype=np.float64)
self.back_feats = np.empty((0, 256), dtype=np.float64)
self.front_feats = np.empty((0, 256), dtype=np.float64)
self.feats_compose = np.empty((0, 256), dtype=np.float64)
self.feats_select = np.empty((0, 256), dtype=np.float64)
if stype=="data":
self.from_datafile(eventpath)
if stype=="video":
self.from_video(eventpath)
if stype=="image":
self.from_image(eventpath)
def from_datafile(self, eventpath):
evtList = self.evtname.split('_')
if len(evtList)>=2 and len(evtList[-1])>=10 and evtList[-1].isdigit():
self.barcode = evtList[-1]
if len(evtList)==3 and evtList[-1]== evtList[-2]:
self.evtType = 'input'
else:
self.evtType = 'other'
'''================ path of image ============='''
frontImgs, frontFid = [], []
backImgs, backFid = [], []
for imgname in os.listdir(eventpath):
name, ext = os.path.splitext(imgname)
if ext not in IMG_FORMAT or name.find('frameId') < 0: continue
if len(name.split('_')) != 3 and not name.split('_')[3].isdigit(): continue
CamerType = name.split('_')[0]
frameId = int(name.split('_')[3])
imgpath = os.path.join(eventpath, imgname)
if CamerType == '0':
backImgs.append(imgpath)
backFid.append(frameId)
if CamerType == '1':
frontImgs.append(imgpath)
frontFid.append(frameId)
## 生成依据帧 ID 排序的前后摄图像地址列表
frontIdx = np.argsort(np.array(frontFid))
backIdx = np.argsort(np.array(backFid))
self.front_imgpaths = [frontImgs[i] for i in frontIdx]
self.back_imgpaths = [backImgs[i] for i in backIdx]
'''================ path of video ============='''
for vidname in os.listdir(eventpath):
name, ext = os.path.splitext(vidname)
if ext not in VID_FORMAT: continue
vidpath = os.path.join(eventpath, vidname)
CamerType = name.split('_')[0]
if CamerType == '0':
self.back_videopath = vidpath
if CamerType == '1':
self.front_videopath = vidpath
'''================ process.data ============='''
procpath = Path(eventpath).joinpath('process.data')
if procpath.is_file():
SimiDict = read_similar(procpath)
self.one2one = SimiDict['one2one']
self.one2n = SimiDict['one2n']
'''=========== 0/1_track.data & 0/1_tracking_output.data ======='''
for dataname in os.listdir(eventpath):
datapath = os.path.join(eventpath, dataname)
if not os.path.isfile(datapath): continue
CamerType = dataname.split('_')[0]
'''========== 0/1_track.data =========='''
if dataname.find("_track.data")>0:
bboxes, ffeats, trackerboxes, tracker_feat_dict, trackingboxes, tracking_feat_dict = extract_data(datapath)
if CamerType == '0':
self.back_yolobboxes = bboxes
self.back_yolofeats = ffeats
self.back_trackerboxes = trackerboxes
self.back_trackerfeats = tracker_feat_dict
self.back_trackingboxes = trackingboxes
self.back_trackingfeats = tracking_feat_dict
if CamerType == '1':
self.front_yolobboxes = bboxes
self.front_yolofeats = ffeats
self.front_trackerboxes = trackerboxes
self.front_trackerfeats = tracker_feat_dict
self.front_trackingboxes = trackingboxes
self.front_trackingfeats = tracking_feat_dict
'''========== 0/1_tracking_output.data =========='''
if dataname.find("_tracking_output.data")>0:
tracking_output_boxes, tracking_output_feats = read_tracking_output(datapath)
if CamerType == '0':
self.back_boxes = tracking_output_boxes
self.back_feats = tracking_output_feats
elif CamerType == '1':
self.front_boxes = tracking_output_boxes
self.front_feats = tracking_output_feats
self.select_feat()
self.compose_feats()
def compose_feats(self):
'''事件的特征集成'''
feats_compose = np.empty((0, 256), dtype=np.float64)
if len(self.front_feats):
feats_compose = np.concatenate((feats_compose, self.front_feats), axis=0)
if len(self.back_feats):
feats_compose = np.concatenate((feats_compose, self.back_feats), axis=0)
self.feats_compose = feats_compose
def select_feats(self):
'''事件的特征选择'''
if len(self.front_feats):
self.feats_select = self.front_feats
else:
self.feats_select = self.back_feats