This commit is contained in:
王庆刚
2024-11-04 18:06:52 +08:00
parent dfb2272a15
commit 5ecc1285d4
41 changed files with 2552 additions and 440 deletions

View File

@ -7,11 +7,13 @@ Created on Mon Jan 15 15:26:38 2024
import numpy as np
import cv2
import os
from pathlib import Path
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from utils.annotator import TrackAnnotator
from utils.plotting import colors
from pathlib import Path
from tracking.utils.annotator import TrackAnnotator
from tracking.utils.plotting import colors
def plot_frameID_y2(vts):

View File

@ -10,10 +10,10 @@ import numpy as np
import re
import os
from collections import OrderedDict
import warnings
import matplotlib.pyplot as plt
def str_to_float_arr(s):
# 移除字符串末尾的逗号(如果存在)
if s.endswith(','):
@ -31,7 +31,9 @@ def find_samebox_in_array(arr, target):
return i
return -1
import warnings
def extract_data(datapath):
@ -41,30 +43,26 @@ def extract_data(datapath):
trackerfeats = np.empty((0, 256), dtype=np.float64)
boxes, feats, tboxes, tfeats = [], [], [], []
timestamps, frameIds = [], []
with open(datapath, 'r', encoding='utf-8') as lines:
for line in lines:
line = line.strip() # 去除行尾的换行符和可能的空白字符
if not line: # 跳过空行
continue
if line.find("CameraId")>=0:
if len(boxes): bboxes.append(np.array(boxes))
if len(feats): ffeats.append(np.array(feats))
# with warnings.catch_warnings(record=True) as w:
# if len(boxes): bboxes.append(np.array(boxes))
# if len(feats): ffeats.append(np.array(feats))
# if w:
# print(f"捕获到 {len(w)} 个警告:")
# for warning in w:
# print(f"警告类型: {warning.category}")
# print(f"警告消息: {warning.message}")
# print(f"警告发生的地方: {warning.filename}:{warning.lineno}")
if len(tboxes):
if len(tboxes):
trackerboxes = np.concatenate((trackerboxes, np.array(tboxes)))
if len(tfeats):
trackerfeats = np.concatenate((trackerfeats, np.array(tfeats)))
timestamp, frameId = [int(ln.split(":")[1]) for ln in line.split(",")[1:]]
timestamps.append(timestamp)
frameIds.append(frameId)
boxes, feats, tboxes, tfeats = [], [], [], []
@ -103,6 +101,9 @@ def extract_data(datapath):
assert(len(trackerboxes)==len(trackerfeats)), "Error at tracker output!"
tracker_feat_dict = {}
tracker_feat_dict["timestamps"] = timestamps
tracker_feat_dict["frameIds"] = frameIds
for i in range(len(trackerboxes)):
tid, fid, bid = int(trackerboxes[i, 4]), int(trackerboxes[i, 7]), int(trackerboxes[i, 8])
if f"frame_{fid}" not in tracker_feat_dict:
@ -169,8 +170,8 @@ def read_tracking_output(filepath):
return np.array(boxes), np.array(feats)
def read_deletedBarcode_file(filePth):
with open(filePth, 'r', encoding='utf-8') as f:
def read_deletedBarcode_file(filePath):
with open(filePath, 'r', encoding='utf-8') as f:
lines = f.readlines()
split_flag, all_list = False, []
@ -179,6 +180,9 @@ def read_deletedBarcode_file(filePth):
clean_lines = [line.strip().replace("'", '').replace('"', '') for line in lines]
for i, line in enumerate(clean_lines):
if line.endswith(','):
line = line[:-1]
stripped_line = line.strip()
if not stripped_line:
if len(barcode_list): dict['barcode'] = barcode_list
@ -210,11 +214,106 @@ def read_deletedBarcode_file(filePth):
return all_list
def read_returnGoods_file(filePath):
'''
20241030开始原 deletedBarcode.txt 中数据格式修改为 returnGoods.txt读数方式随之变化
'''
with open(filePath, 'r', encoding='utf-8') as f:
lines = f.readlines()
clean_lines = [line.strip().replace("'", '').replace('"', '') for line in lines]
all_list = []
split_flag, dict = False, {}
barcode_list, similarity_list = [], []
event_list, type_list = [], []
for i, line in enumerate(clean_lines):
stripped_line = line.strip()
if line.endswith(','):
line = line[:-1]
if not stripped_line:
if len(barcode_list): dict['barcode'] = barcode_list
if len(similarity_list): dict['similarity'] = similarity_list
if len(event_list): dict['event'] = event_list
if len(type_list): dict['type'] = type_list
if len(dict) and dict['SeqDir'].find('*')<0:
all_list.append(dict)
split_flag, dict = False, {}
barcode_list, similarity_list = [], []
event_list, type_list = [], []
continue
if line.find(':')<0: continue
if line.find('1:n')==0: continue
label = line.split(':')[0].strip()
value = line.split(':')[1].strip()
if label == 'SeqDir':
dict['SeqDir'] = value
dict['Deleted'] = value.split('_')[-1]
if label == 'List':
split_flag = True
continue
if split_flag:
event_list.append(label)
barcode_list.append(label.split('_')[-1])
similarity_list.append(value.split(',')[0])
type_list.append(value.split('=')[-1])
if len(barcode_list): dict['barcode'] = barcode_list
if len(similarity_list): dict['similarity'] = similarity_list
if len(event_list): dict['event'] = event_list
if len(type_list): dict['type'] = type_list
if len(dict) and dict['SeqDir'].find('*')<0:
all_list.append(dict)
return all_list
def read_seneor(filepath):
WeightDict = OrderedDict()
with open(filepath, 'r', encoding='utf-8') as f:
lines = f.readlines()
for i, line in enumerate(lines):
line = line.strip()
keyword = line.split(':')[0]
value = line.split(':')[1]
vdata = [float(s) for s in value.split(',') if len(s)]
WeightDict[keyword] = vdata[-1]
return WeightDict
def read_weight_timeConsuming(filePth):
WeightDict, SensorDict, ProcessTimeDict = OrderedDict(), OrderedDict(), OrderedDict()
with open(filePth, 'r', encoding='utf-8') as f:
lines = f.readlines()
# label = ''
for i, line in enumerate(lines):
line = line.strip()