Files
detecttracking/tracking/utils/read_data.py
王庆刚 c47894ddc0 abc
2024-11-08 08:52:56 +08:00

482 lines
16 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

# -*- coding: utf-8 -*-
"""
Created on Fri Jul 5 13:59:21 2024
func: extract_data()
读取 Pipeline 各模块的数据,在 read_pipeline_data.py马晓慧的基础上完成接口改造
@author: ym
"""
import numpy as np
import re
import os
from collections import OrderedDict
import warnings
import matplotlib.pyplot as plt
def str_to_float_arr(s):
# 移除字符串末尾的逗号(如果存在)
if s.endswith(','):
s = s[:-1]
# 使用split()方法分割字符串然后将每个元素转化为float
float_array = [float(x) for x in s.split(",")]
return float_array
def find_samebox_in_array(arr, target):
for i, st in enumerate(arr):
if st[:4] == target[:4]:
return i
return -1
def extract_data(datapath):
'''
0/1_track.data 数据读取
'''
bboxes, ffeats = [], []
trackerboxes = np.empty((0, 9), dtype=np.float64)
trackerfeats = np.empty((0, 256), dtype=np.float64)
boxes, feats, tboxes, tfeats = [], [], [], []
timestamps, frameIds = [], []
with open(datapath, 'r', encoding='utf-8') as lines:
for line in lines:
line = line.strip() # 去除行尾的换行符和可能的空白字符
if not line: # 跳过空行
continue
if line.find("CameraId")>=0:
if len(boxes): bboxes.append(np.array(boxes))
if len(feats): ffeats.append(np.array(feats))
if len(tboxes):
trackerboxes = np.concatenate((trackerboxes, np.array(tboxes)))
if len(tfeats):
trackerfeats = np.concatenate((trackerfeats, np.array(tfeats)))
timestamp, frameId = [int(ln.split(":")[1]) for ln in line.split(",")[1:]]
timestamps.append(timestamp)
frameIds.append(frameId)
boxes, feats, tboxes, tfeats = [], [], [], []
if line.find("box:") >= 0 and line.find("output_box:") < 0:
box = line[line.find("box:") + 4:].strip()
# if len(box)==6:
boxes.append(str_to_float_arr(box))
if line.find("feat:") >= 0:
feat = line[line.find("feat:") + 5:].strip()
# if len(feat)==256:
feats.append(str_to_float_arr(feat))
if line.find("output_box:") >= 0:
assert(len(boxes)>=0 and len(boxes)==len(feats)), f"{datapath}, {datapath}, len(boxes)!=len(feats)"
box = str_to_float_arr(line[line.find("output_box:") + 11:].strip())
index = find_samebox_in_array(boxes, box)
if index >= 0:
tboxes.append(box) # 去掉'output_box:'并去除可能的空白字符
# feat_f = str_to_float_arr(input_feats[index])
feat_f = feats[index]
norm_f = np.linalg.norm(feat_f)
feat_f = feat_f / norm_f
tfeats.append(feat_f)
if len(boxes): bboxes.append(np.array(boxes))
if len(feats): ffeats.append(np.array(feats))
if len(tboxes): trackerboxes = np.concatenate((trackerboxes, np.array(tboxes)))
if len(tfeats): trackerfeats = np.concatenate((trackerfeats, np.array(tfeats)))
assert(len(bboxes)==len(ffeats)), "Error at Yolo output!"
assert(len(trackerboxes)==len(trackerfeats)), "Error at tracker output!"
tracker_feat_dict = {}
tracker_feat_dict["timestamps"] = timestamps
tracker_feat_dict["frameIds"] = frameIds
for i in range(len(trackerboxes)):
tid, fid, bid = int(trackerboxes[i, 4]), int(trackerboxes[i, 7]), int(trackerboxes[i, 8])
if f"frame_{fid}" not in tracker_feat_dict:
tracker_feat_dict[f"frame_{fid}"]= {"feats": {}}
tracker_feat_dict[f"frame_{fid}"]["feats"].update({bid: trackerfeats[i, :]})
boxes, trackingboxes= [], []
tracking_flag = False
with open(datapath, 'r', encoding='utf-8') as lines:
for line in lines:
line = line.strip() # 去除行尾的换行符和可能的空白字符
if not line: # 跳过空行
continue
if tracking_flag:
if line.find("tracking_") >= 0:
tracking_flag = False
else:
box = str_to_float_arr(line)
boxes.append(box)
if line.find("tracking_") >= 0:
tracking_flag = True
if len(boxes):
trackingboxes.append(np.array(boxes))
boxes = []
if len(boxes):
trackingboxes.append(np.array(boxes))
tracking_feat_dict = {}
try:
for i, boxes in enumerate(trackingboxes):
for box in boxes:
tid, fid, bid = int(box[4]), int(box[7]), int(box[8])
if f"track_{tid}" not in tracking_feat_dict:
tracking_feat_dict[f"track_{tid}"]= {"feats": {}}
tracking_feat_dict[f"track_{tid}"]["feats"].update({f"{fid}_{bid}": tracker_feat_dict[f"frame_{fid}"]["feats"][bid]})
except Exception as e:
print(f'Path: {datapath}, tracking_feat_dict can not be structured correcttly, Error: {e}')
return bboxes, ffeats, trackerboxes, tracker_feat_dict, trackingboxes, tracking_feat_dict
def read_tracking_output(filepath):
'''
0/1_tracking_output.data 数据读取
'''
boxes = []
feats = []
if not os.path.isfile(filepath):
return np.array(boxes), np.array(feats)
with open(filepath, 'r', encoding='utf-8') as file:
for line in file:
line = line.strip() # 去除行尾的换行符和可能的空白字符
if not line:
continue
if line.endswith(','):
line = line[:-1]
data = np.array([float(x) for x in line.split(",")])
if data.size == 9:
boxes.append(data)
if data.size == 256:
feats.append(data)
assert(len(feats)==len(boxes)), f"{filepath}, len(feats)!=len(boxes)"
return np.array(boxes), np.array(feats)
def read_deletedBarcode_file(filePath):
with open(filePath, 'r', encoding='utf-8') as f:
lines = f.readlines()
split_flag, all_list = False, []
dict, barcode_list, similarity_list = {}, [], []
clean_lines = [line.strip().replace("'", '').replace('"', '') for line in lines]
for i, line in enumerate(clean_lines):
if line.endswith(','):
line = line[:-1]
stripped_line = line.strip()
if not stripped_line:
if len(barcode_list): dict['barcode'] = barcode_list
if len(similarity_list): dict['similarity'] = similarity_list
if len(dict): all_list.append(dict)
split_flag = False
dict, barcode_list, similarity_list = {}, [], []
continue
if line.find(':')<0: continue
label = line.split(':')[0]
value = line.split(':')[1]
if label == 'SeqDir':
dict['SeqDir'] = value
dict['filetype'] = "deletedBarcode"
if label == 'Deleted':
dict['Deleted'] = value
if label == 'List':
split_flag = True
continue
if split_flag:
barcode_list.append(label)
similarity_list.append(value)
if len(barcode_list): dict['barcode'] = barcode_list
if len(similarity_list): dict['similarity'] = similarity_list
if len(dict): all_list.append(dict)
return all_list
def read_returnGoods_file(filePath):
'''
20241030开始原 deletedBarcode.txt 中数据格式修改为 returnGoods.txt读数方式随之变化
'''
with open(filePath, 'r', encoding='utf-8') as f:
lines = f.readlines()
clean_lines = [line.strip().replace("'", '').replace('"', '') for line in lines]
all_list = []
split_flag, dict = False, {}
barcode_list, similarity_list = [], []
event_list, type_list = [], []
for i, line in enumerate(clean_lines):
stripped_line = line.strip()
if line.endswith(','):
line = line[:-1]
if not stripped_line:
if len(barcode_list): dict['barcode'] = barcode_list
if len(similarity_list): dict['similarity'] = similarity_list
if len(event_list): dict['event'] = event_list
if len(type_list): dict['type'] = type_list
if len(dict) and dict['SeqDir'].find('*')<0:
all_list.append(dict)
split_flag, dict = False, {}
barcode_list, similarity_list = [], []
event_list, type_list = [], []
continue
if line.find(':')<0: continue
if line.find('1:n')==0: continue
label = line.split(':')[0].strip()
value = line.split(':')[1].strip()
if label == 'SeqDir':
dict['SeqDir'] = value
dict['Deleted'] = value.split('_')[-1]
dict['filetype'] = "returnGoods"
if label == 'List':
split_flag = True
continue
if split_flag:
bcd = label.split('_')[-1]
# event_list.append(label + '_' + bcd)
event_list.append(label)
barcode_list.append(bcd)
similarity_list.append(value.split(',')[0])
type_list.append(value.split('=')[-1])
if len(barcode_list): dict['barcode'] = barcode_list
if len(similarity_list): dict['similarity'] = similarity_list
if len(event_list): dict['event'] = event_list
if len(type_list): dict['type'] = type_list
if len(dict) and dict['SeqDir'].find('*')<0:
all_list.append(dict)
return all_list
# =============================================================================
# def read_seneor(filepath):
# WeightDict = OrderedDict()
# with open(filepath, 'r', encoding='utf-8') as f:
# lines = f.readlines()
# for i, line in enumerate(lines):
# line = line.strip()
#
# keyword = line.split(':')[0]
# value = line.split(':')[1]
#
# vdata = [float(s) for s in value.split(',') if len(s)]
#
# WeightDict[keyword] = vdata[-1]
#
# return WeightDict
# =============================================================================
def read_one2one_simi(filePath):
SimiDict = {}
with open(filePath, 'r', encoding='utf-8') as f:
lines = f.readlines()
flag = False
for i, line in enumerate(lines):
line = line.strip()
if line.find('barcode:')<0 and not flag:
continue
if line.find('barcode:')==0 :
flag = True
continue
# if line.endswith(','):
# line = line[:-1]
if flag:
barcode = line.split(',')[0].strip()
value = line.split(',')[1].split(':')[1].strip()
SimiDict[barcode] = float(value)
if flag and not line:
flag = False
return SimiDict
def read_weight_timeConsuming(filePth):
WeightDict, SensorDict, ProcessTimeDict = OrderedDict(), OrderedDict(), OrderedDict()
with open(filePth, 'r', encoding='utf-8') as f:
lines = f.readlines()
# label = ''
for i, line in enumerate(lines):
line = line.strip()
if line.find(':') < 0: continue
if line.find("Weight") >= 0:
label = "Weight"
continue
if line.find("Sensor") >= 0:
label = "Sensor"
continue
if line.find("processTime") >= 0:
label = "ProcessTime"
continue
keyword = line.split(':')[0]
value = line.split(':')[1]
if label == "Weight":
WeightDict[keyword] = float(value.strip(','))
if label == "Sensor":
SensorDict[keyword] = [float(s) for s in value.split(',') if len(s)]
if label == "ProcessTime":
ProcessTimeDict[keyword] = float(value.strip(','))
# print("Done!")
return WeightDict, SensorDict, ProcessTimeDict
def plot_sensor_curve(WeightDict, SensorDict, ProcessTimeDict):
wtime, wdata = [], []
stime, sdata = [], []
for key, value in WeightDict.items():
wtime.append(int(key))
wdata.append(value)
for key, value in SensorDict.items():
if len(value) != 9: continue
stime.append(int(key))
sdata.append(np.array(value))
static_range = []
dynamic_range = []
windth = 8
nw = len(wdata)
assert(nw) >= 8, "The num of weight data is less than 8!"
# i1, i2 = 0, 7
# while i2 < nw:
# data = wdata[i1:(i2+1)]
# max(data) - min(data)
# if i2<7:
# i1 = 0
# else:
# i1 = i2-windth
min_t = min(wtime + stime)
wtime = [t-min_t for t in wtime]
stime = [t-min_t for t in stime]
max_t = max(wtime + stime)
fig = plt.figure(figsize=(16, 12))
gs = fig.add_gridspec(2, 1, left=0.1, right=0.9, bottom=0.1, top=0.9,
wspace=0.05, hspace=0.15)
# ax1, ax2 = axs
ax1 = fig.add_subplot(gs[0,0])
ax2 = fig.add_subplot(gs[1,0])
ax1.plot(wtime, wdata, 'b--', linewidth=2 )
for i in range(9):
ydata = [s[i] for s in sdata]
ax2.plot(stime, ydata, linewidth=2 )
ax1.grid(True), ax1.set_xlim(0, max_t), ax1.set_title('Weight')
ax1.set_label("(Time: ms)")
# ax1.legend()
ax2.grid(True), ax2.set_xlim(0, max_t), ax2.set_title('IMU')
# ax2.legend()
plt.show()
def test_process(file_path):
WeightDict, SensorDict, ProcessTimeDict = read_weight_timeConsuming(file_path)
plot_sensor_curve(WeightDict, SensorDict, ProcessTimeDict)
def main():
files_path = r'\\192.168.1.28\share\测试_202406\0814\0814\20240814-102227-62264578-a720-4eb9-b95e-cb8be009aa98_null'
k = 0
for filename in os.listdir(files_path):
filename = 'process.data'
file_path = os.path.join(files_path, filename)
if os.path.isfile(file_path) and filename.find("track.data")>0:
extract_data(file_path)
if os.path.isfile(file_path) and filename.find("process.data")>=0:
test_process(file_path)
k += 1
if k == 1:
break
def main1():
fpath = r'\\192.168.1.28\share\测试_202406\1101\images\20241101-140456-44dc75b5-c406-4cb2-8317-c4660bb727a3_6922130101355_6922130101355\process.data'
simidct = read_one2one_simi(fpath)
print(simidct)
if __name__ == "__main__":
# main()
main1()