Files
detecttracking/tracking/utils/read_data.py
王庆刚 0cc36ba920 bakeup
2024-09-02 11:50:08 +08:00

373 lines
11 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

# -*- coding: utf-8 -*-
"""
Created on Fri Jul 5 13:59:21 2024
func: extract_data()
读取 Pipeline 各模块的数据,在 read_pipeline_data.py马晓慧的基础上完成接口改造
@author: ym
"""
import numpy as np
import re
import os
from collections import OrderedDict
import matplotlib.pyplot as plt
def str_to_float_arr(s):
# 移除字符串末尾的逗号(如果存在)
if s.endswith(','):
s = s[:-1]
# 使用split()方法分割字符串然后将每个元素转化为float
float_array = [float(x) for x in s.split(",")]
return float_array
def find_samebox_in_array(arr, target):
for i, st in enumerate(arr):
if st[:4] == target[:4]:
return i
return -1
import warnings
def extract_data(datapath):
bboxes, ffeats = [], []
trackerboxes = np.empty((0, 9), dtype=np.float64)
trackerfeats = np.empty((0, 256), dtype=np.float64)
boxes, feats, tboxes, tfeats = [], [], [], []
with open(datapath, 'r', encoding='utf-8') as lines:
for line in lines:
line = line.strip() # 去除行尾的换行符和可能的空白字符
if not line: # 跳过空行
continue
if line.find("CameraId")>=0:
if len(boxes): bboxes.append(np.array(boxes))
if len(feats): ffeats.append(np.array(feats))
# with warnings.catch_warnings(record=True) as w:
# if len(boxes): bboxes.append(np.array(boxes))
# if len(feats): ffeats.append(np.array(feats))
# if w:
# print(f"捕获到 {len(w)} 个警告:")
# for warning in w:
# print(f"警告类型: {warning.category}")
# print(f"警告消息: {warning.message}")
# print(f"警告发生的地方: {warning.filename}:{warning.lineno}")
if len(tboxes):
trackerboxes = np.concatenate((trackerboxes, np.array(tboxes)))
if len(tfeats):
trackerfeats = np.concatenate((trackerfeats, np.array(tfeats)))
boxes, feats, tboxes, tfeats = [], [], [], []
if line.find("box:") >= 0 and line.find("output_box:") < 0:
box = line[line.find("box:") + 4:].strip()
# if len(box)==6:
boxes.append(str_to_float_arr(box))
if line.find("feat:") >= 0:
feat = line[line.find("feat:") + 5:].strip()
# if len(feat)==256:
feats.append(str_to_float_arr(feat))
if line.find("output_box:") >= 0:
box = str_to_float_arr(line[line.find("output_box:") + 11:].strip())
tboxes.append(box) # 去掉'output_box:'并去除可能的空白字符
index = find_samebox_in_array(boxes, box)
assert(len(boxes)==len(feats)), f"{datapath}, {datapath}, len(boxes)!=len(feats)"
if index >= 0:
# feat_f = str_to_float_arr(input_feats[index])
feat_f = feats[index]
norm_f = np.linalg.norm(feat_f)
feat_f = feat_f / norm_f
tfeats.append(feat_f)
if len(boxes): bboxes.append(np.array(boxes))
if len(feats): ffeats.append(np.array(feats))
if len(tboxes): trackerboxes = np.concatenate((trackerboxes, np.array(tboxes)))
if len(tfeats): trackerfeats = np.concatenate((trackerfeats, np.array(tfeats)))
assert(len(bboxes)==len(ffeats)), "Error at Yolo output!"
assert(len(trackerboxes)==len(trackerfeats)), "Error at tracker output!"
tracker_feat_dict = {}
for i in range(len(trackerboxes)):
tid, fid, bid = int(trackerboxes[i, 4]), int(trackerboxes[i, 7]), int(trackerboxes[i, 8])
if f"frame_{fid}" not in tracker_feat_dict:
tracker_feat_dict[f"frame_{fid}"]= {"feats": {}}
tracker_feat_dict[f"frame_{fid}"]["feats"].update({bid: trackerfeats[i, :]})
boxes, trackingboxes= [], []
tracking_flag = False
with open(datapath, 'r', encoding='utf-8') as lines:
for line in lines:
line = line.strip() # 去除行尾的换行符和可能的空白字符
if not line: # 跳过空行
continue
if tracking_flag:
if line.find("tracking_") >= 0:
tracking_flag = False
else:
box = str_to_float_arr(line)
boxes.append(box)
if line.find("tracking_") >= 0:
tracking_flag = True
if len(boxes):
trackingboxes.append(np.array(boxes))
boxes = []
if len(boxes):
trackingboxes.append(np.array(boxes))
tracking_feat_dict = {}
try:
for i, boxes in enumerate(trackingboxes):
for box in boxes:
tid, fid, bid = int(box[4]), int(box[7]), int(box[8])
if f"track_{tid}" not in tracking_feat_dict:
tracking_feat_dict[f"track_{tid}"]= {"feats": {}}
tracking_feat_dict[f"track_{tid}"]["feats"].update({f"{fid}_{bid}": tracker_feat_dict[f"frame_{fid}"]["feats"][bid]})
except Exception as e:
print(f'Path: {datapath}, tracking_feat_dict can not be structured correcttly, Error: {e}')
return bboxes, ffeats, trackerboxes, tracker_feat_dict, trackingboxes, tracking_feat_dict
def read_tracking_output(filepath):
boxes = []
feats = []
with open(filepath, 'r', encoding='utf-8') as file:
for line in file:
line = line.strip() # 去除行尾的换行符和可能的空白字符
if not line:
continue
if line.endswith(','):
line = line[:-1]
data = np.array([float(x) for x in line.split(",")])
if data.size == 9:
boxes.append(data)
if data.size == 256:
feats.append(data)
assert(len(feats)==len(boxes)), f"{filepath}, len(feats)!=len(boxes)"
return np.array(boxes), np.array(feats)
def read_deletedBarcode_file(filePth):
with open(filePth, 'r', encoding='utf-8') as f:
lines = f.readlines()
split_flag, all_list = False, []
dict, barcode_list, similarity_list = {}, [], []
clean_lines = [line.strip().replace("'", '').replace('"', '') for line in lines]
for i, line in enumerate(clean_lines):
stripped_line = line.strip()
if not stripped_line:
if len(barcode_list): dict['barcode'] = barcode_list
if len(similarity_list): dict['similarity'] = similarity_list
if len(dict): all_list.append(dict)
split_flag = False
dict, barcode_list, similarity_list = {}, [], []
continue
if line.find(':')<0: continue
label = line.split(':')[0]
value = line.split(':')[1]
if label == 'SeqDir':
dict['SeqDir'] = value
if label == 'Deleted':
dict['Deleted'] = value
if label == 'List':
split_flag = True
continue
if split_flag:
barcode_list.append(label)
similarity_list.append(value)
if len(barcode_list): dict['barcode'] = barcode_list
if len(similarity_list): dict['similarity'] = similarity_list
if len(dict): all_list.append(dict)
return all_list
def read_weight_timeConsuming(filePth):
WeightDict, SensorDict, ProcessTimeDict = OrderedDict(), OrderedDict(), OrderedDict()
with open(filePth, 'r', encoding='utf-8') as f:
lines = f.readlines()
for i, line in enumerate(lines):
line = line.strip()
if line.find(':') < 0: continue
if line.find("Weight") >= 0:
label = "Weight"
continue
if line.find("Sensor") >= 0:
label = "Sensor"
continue
if line.find("processTime") >= 0:
label = "ProcessTime"
continue
keyword = line.split(':')[0]
value = line.split(':')[1]
if label == "Weight":
WeightDict[keyword] = float(value.strip(','))
if label == "Sensor":
SensorDict[keyword] = [float(s) for s in value.split(',') if len(s)]
if label == "ProcessTime":
ProcessTimeDict[keyword] = float(value.strip(','))
# print("Done!")
return WeightDict, SensorDict, ProcessTimeDict
def plot_sensor_curve(WeightDict, SensorDict, ProcessTimeDict):
wtime, wdata = [], []
stime, sdata = [], []
for key, value in WeightDict.items():
wtime.append(int(key))
wdata.append(value)
for key, value in SensorDict.items():
if len(value) != 9: continue
stime.append(int(key))
sdata.append(np.array(value))
static_range = []
dynamic_range = []
windth = 8
nw = len(wdata)
assert(nw) >= 8, "The num of weight data is less than 8!"
i1, i2 = 0, 7
while i2 < nw:
data = wdata[i1:(i2+1)]
max(data) - min(data)
if i2<7:
i1 = 0
else:
i1 = i2-windth
min_t = min(wtime + stime)
wtime = [t-min_t for t in wtime]
stime = [t-min_t for t in stime]
max_t = max(wtime + stime)
fig = plt.figure(figsize=(16, 12))
gs = fig.add_gridspec(2, 1, left=0.1, right=0.9, bottom=0.1, top=0.9,
wspace=0.05, hspace=0.15)
# ax1, ax2 = axs
ax1 = fig.add_subplot(gs[0,0])
ax2 = fig.add_subplot(gs[1,0])
ax1.plot(wtime, wdata, 'b--', linewidth=2 )
for i in range(9):
ydata = [s[i] for s in sdata]
ax2.plot(stime, ydata, linewidth=2 )
ax1.grid(True), ax1.set_xlim(0, max_t), ax1.set_title('Weight')
ax1.set_label("(Time: ms)")
# ax1.legend()
ax2.grid(True), ax2.set_xlim(0, max_t), ax2.set_title('IMU')
# ax2.legend()
plt.show()
def main(file_path):
WeightDict, SensorDict, ProcessTimeDict = read_weight_timeConsuming(file_path)
plot_sensor_curve(WeightDict, SensorDict, ProcessTimeDict)
if __name__ == "__main__":
files_path = r'\\192.168.1.28\share\测试_202406\0814\0814\20240814-102227-62264578-a720-4eb9-b95e-cb8be009aa98_null'
k = 0
for filename in os.listdir(files_path):
filename = 'process.data'
file_path = os.path.join(files_path, filename)
if os.path.isfile(file_path) and filename.find("track.data")>0:
extract_data(file_path)
if os.path.isfile(file_path) and filename.find("process.data")>=0:
main(file_path)
k += 1
if k == 1:
break
# print("Done")