Files
detecttracking/realtime/time_devide.py
2025-02-24 18:56:54 +08:00

533 lines
19 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

# -*- coding: utf-8 -*-
"""
Created on Wed Oct 16 17:37:07 2024
@author: ym
"""
# import csv
import os
# import platform
# import sys
from pathlib import Path
import glob
import numpy as np
import copy
import matplotlib.pyplot as plt
from collections import OrderedDict
from event_time_specify import devide_motion_state #, state_measure
import sys
sys.path.append(r"D:\DetectTracking")
from imgs_inference import run_yolo
from tracking.utils.read_data import read_weight_sensor
# IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes
# VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes
def filesort(p):
'''
需将图像文件名标准化
'''
files = []
files.extend(sorted(glob.glob(os.path.join(p, '*.jpg'))))
# images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
tamps_0, tamps_1 = [], []
files_0, files_1 = [], []
for file in files:
basename = os.path.basename(file)
# if basename.find('frameId')<0: continue
f, ext = os.path.splitext(basename)
_, camer, tamp = f.split('_')
if camer == '0':
tamps_0.append(int(tamp))
files_0.append(file)
if camer == '1':
tamps_1.append(int(tamp))
files_1.append(file)
idx0 = sorted(range(len(tamps_0)), key=lambda k: tamps_0[k])
files0 = [files_0[i] for i in idx0]
idx1 = sorted(range(len(tamps_1)), key=lambda k: tamps_1[k])
files1 = [files_1[i] for i in idx1]
return (files0, files1)
def rename(filePath, tmin):
"""
重命名函数fun1
输入:文件夹路径
功能:对文件夹中的全部文件进行随机命名
"""
suffix = '.png' # 设置后缀,筛选特定文件以更改名称
for file in os.listdir(filePath):
if file.endswith(suffix):
name = file.split('.')[0]
tamp = int(name.split('_')[2])-tmin
suffix = file.split('.')[1]
newname = name +f'_{int(tamp)}.'+suffix
os.rename(os.path.join(filePath, file), os.path.join(filePath, newname))
def rerename(filePath=None):
"""
重命名函数fun1
输入:文件夹路径
功能:对文件夹中的全部文件进行随机命名
"""
suffix = '.png' # 设置后缀,筛选特定文件以更改名称
for file in os.listdir(filePath):
if file.endswith(suffix):
name = file.split('.')[0]
names = name.split('_')
if len(names)>=6:
newname = "_".join(names[0:5])+'.png'
os.rename(os.path.join(filePath, file), os.path.join(filePath, newname))
def state_measure(periods, weights, hands, spath=None):
'''
数据类型
后摄: 0, 前摄: 1, CV综合: 2, 重力: 9
frstate_0/1:
帧ID 时间戳 单摄状态1 单摄状态2 活动轨迹标记
0 1 2 3 4:end
time_stream = np.concatenate((tstream, weights))
time_stream:
序列索引号 数据类型 时间戳 单摄状态1/重力值 单摄状态2/重力(t0, t1) CV状态/重力(t0', t1') 综合数据类型 综合状态
0 1 2 3 4 5 6 7
单摄状态1基于运动轨迹的起止点确定的运动区间
单摄状态2: 基于滑动窗口的起止点(窗口终点)确定的运动区间
重力(t0, t1): 重力波动的精确时间区间,基于重力波动的起止点,而不是仅依赖重力稳定时间
重力(t0', t1'): 根据加退购对重力波动窗口进行扩展,扩展应该涵盖购物事件的发生过程
方案:
前后摄状态进行或运算
CV与重力状态进行与运算
'''
# BackType = 0 # 后摄数据类型
# FrontType = 1 # 前摄数据类型
CameraType = 2 # CV数据综合类型
HandType = 3 # 手部类型
WeightType = 9 # 重力数据类型
WeightStableThresh = 7.5 # 单位g重力稳定状态下的最大波动范围
WeightWinWidth = 10 # 单位重力数据点数该值和采样间隔关联重力稳定时间设定为500ms = WeightWinWidth * 采样间隔
CameraTimeInterval = 100 # 单位ms前后摄状态进行或运算时的时间间隔阈值
InputFrontNum = 10 # 重力增加(放入)向前延拓的重力数据点数
InputBackNum = 0 # 重力增加(放入)向后延拓的重力数据点数
OutputFrontNum = 2 # 重力减少(取出)向前延拓的重力数据点数
OutputBackNum = 10 # 重力减少(取出)向前延拓的重力数据点数
CompTimeInterval = 150 # 单位msCV状态和重力状态进行与运算时的时间间隔阈值
'''==================== 1.1 Weight 数据综合并排序 ======================='''
nw = len(weights)
widx = np.array([k for k in range(0, nw)])[:, None]
wtype = WeightType * np.ones((nw, 1))
wstate = np.zeros((nw, 4))
weights = np.concatenate((widx, wtype, weights, wstate), axis=1).astype(np.int64)
weights[:, 6] = WeightType
weights = weights[np.argsort(weights[:, 2]), :]
'''=================== 1.2 基确Weight的状态切割 ========================='''
w_max = np.max(weights[:, 3])
# i0=0
for i2 in range(0, nw):
i1 = max(i2 - WeightWinWidth, 0)
wvalue = weights[i1:i2+1, 3]
wi2 = weights[i2, 3]
wmin = np.min(wvalue)
wmax = np.max(wvalue)
'''对重力波动区间进行标记,并标记最新一次重力稳定值的索引和相应重力值'''
if wmax - wmin > WeightStableThresh:
weights[i2, 4] = w_max
if i2==0:
i0=0
wi0 = weights[i0, 3]
elif i2>0 and weights[i2-1, 4]==0:
i0 = copy.deepcopy(i2)
wi0 = weights[i0, 3]
if i2>0 and weights[i2-1, 4]!=0 and weights[i2, 4]==0:
# 当前稳定状态下的重力值和前一次重力稳定值的差值,确定放入还是取出
if wi2-wi0 > WeightStableThresh:
i00 = max(i0 - InputFrontNum, 0)
i22 = min(i2 + InputBackNum, nw)
elif wi2-wi0 < -1*WeightStableThresh:
i00 = max(i0 - OutputFrontNum, 0)
i22 = min(i2 + OutputBackNum, nw)
else:
i00 = max(i0 - max(InputFrontNum, OutputFrontNum), 0)
i22 = min(i2 + max(InputBackNum, OutputBackNum), nw)
weights[i00:i22, 5] = w_max + 100
'''===================== 2.1 CV 数据综合并排序 =========================='''
BackType, frstate_0 = periods[0]
FrontType, frstate_1 = periods[1]
n0, n1 = len(frstate_0), len(frstate_1)
idx0 = np.array([i for i in range(0, n0)], dtype=np.int64)[:, None]
idx1 = np.array([i for i in range(0, n1)], dtype=np.int64)[:, None]
ctype0 = BackType * np.ones((n0, 1), dtype=np.int64)
ctype1 = FrontType * np.ones((n1, 1), dtype=np.int64)
tstamp0 = frstate_0[:,1][:, None]
tstamp1 = frstate_1[:,1][:, None]
state0 = frstate_0[:,2][:, None]
state00 = frstate_0[:,3][:, None]
state1 = frstate_1[:,2][:, None]
state11 = frstate_1[:,3][:, None]
'''序列索引号, 相机类型,时间戳, 单摄状态1、单摄状态2、CV综合状态、综合数据类型、综合状态
0 1 2 3 4 5 6 7
'''
tstream0 = np.concatenate((idx0, ctype0, tstamp0, state0, state00), axis=1)
tstream1 = np.concatenate((idx1, ctype1, tstamp1, state1, state11), axis=1)
tstream = np.concatenate((tstream0, tstream1), axis=0)
tstream = np.concatenate((tstream, np.zeros((len(tstream), 3), dtype=np.int64)), axis=1)
tstream[:, 6] = CameraType
tstream = tstream[np.argsort(tstream[:, 2]), :]
'''=============== 2.2 基于前后摄运动轨迹起止点确定CV综合状态 ============'''
for i in range(0, len(tstream)):
idx, ctype, stamp, state = tstream[i, :4]
if i==0:
tstream[i, 5] = state
if i>0:
j = i-1
idx0, ctype0, stamp0, state0 = tstream[j, :4]
while stamp-stamp0 < CameraTimeInterval and ctype == ctype0 and j>0:
j -= 1
idx0, ctype0, stamp0, state0 = tstream[j, :4]
'''两摄像头状态的或运算. 由于前后摄图像不同时,如何构造或运算,关键在于选择不同摄像头的对齐点
i时刻摄像头(ctype)状态state另一摄像头(ctype0 != ctype)距 i 最近最近时刻 j 的状态state0
'''
if ctype != ctype0 and state0==1:
tstream[i, 5] = state0
else:
tstream[i, 5] = state
'''================ 3.1 CV、Wweight 数据综合并排序 ======================'''
time_stream = np.concatenate((tstream, weights), axis=0, dtype=np.int64)
time_stream = time_stream[np.argsort(time_stream[:, 2]), :]
tmin = np.min(time_stream[:, 2])
time_stream[:, 2] = time_stream[:, 2] - tmin
'''============== 3.2 基于 CV 和 Weight 确定 Cart 的综合状态 ============'''
for i in range(0, len(time_stream)):
idx, _, stamp, value, _, state, ctype = time_stream[i, :7]
state = min(state, 1)
if i==0:
time_stream[i, 7] = state
if i>0:
j = i-1
idx0, _, stamp0, value0, _, state0, ctype0 = time_stream[j, :7]
while stamp-stamp0 < CompTimeInterval and ctype == ctype0 and j>0:
j -= 1
idx0, _, stamp0, value0, _, state0, ctype0 = time_stream[j, :7]
'''CV与Weight的与运算. 由于CV与Weight不同时如何构造与运算关键在于选择不同数据源的对齐点
i时数据类型(ctype)状态state另一数据类型(ctype0 != ctype)距 i 最近最近时刻 j 的状态state0
'''
if ctype != ctype0 and state !=0 and state0 !=0:
time_stream[i, 7] = 1
MotionSlice, motion_slice = [], []
t0 = time_stream[0, 7]
for i in range(1, len(time_stream)):
f0 = time_stream[i-1, 7]
f1 = time_stream[i, 7]
if f0==0 and f1==1:
t0 = time_stream[i, 2]
elif f0==1 and f1==0:
t1 = time_stream[i, 2]
if t1-t0>100: #ms
MotionSlice.append((t0+tmin, t1+tmin))
motion_slice.append((t0, t1))
else:
print(f"T0: {t0}, T1: {t1}")
'''===================== 4. Hands数据综合并排序 =========================='''
BackType, hdstate_0 = hands[0]
FrontType, hdstate_1 = hands[1]
n0, n1 = len(hdstate_0), len(hdstate_1)
idx0 = np.array([i for i in range(0, n0)], dtype=np.int64)[:, None]
idx1 = np.array([i for i in range(0, n1)], dtype=np.int64)[:, None]
ctype0 = BackType * np.ones((n0, 1), dtype=np.int64)
ctype1 = FrontType * np.ones((n1, 1), dtype=np.int64)
hstamp0 = hdstate_0[:,1][:, None]
hstamp1 = hdstate_1[:,1][:, None]
state0 = hdstate_0[:,2][:, None]
state1 = hdstate_1[:,2][:, None]
'''序列索引号, 相机类型,时间戳, 单摄手部状态、手部综合状态、保留位2、综合数据类型、综合状态
0 1 2 3 4 5 6 7
'''
hstream0 = np.concatenate((idx0, ctype0, hstamp0, state0), axis=1)
hstream1 = np.concatenate((idx1, ctype1, hstamp1, state1), axis=1)
hstream = np.concatenate((hstream0, hstream1), axis=0)
hstream = np.concatenate((hstream, np.zeros((len(hstream), 4), dtype=np.int64)), axis=1)
hstream[:, 6] = HandType
hstream = hstream[np.argsort(hstream[:, 2]), :]
for i in range(0, len(hstream)):
idx, ctype, stamp, state = hstream[i, :4]
if i==0:
hstream[i, 4] = state
if i>0:
j = i-1
idx0, ctype0, stamp0, state0 = hstream[j, :4]
while stamp-stamp0 < CameraTimeInterval and ctype == ctype0 and j>0:
j -= 1
idx0, ctype0, stamp0, state0 = hstream[j, :4]
'''两摄像头状态的或运算. 由于前后摄图像不同时,如何构造或运算,关键在于选择不同摄像头的对齐点
i时刻摄像头(ctype)状态state另一摄像头(ctype0 != ctype)距 i 最近最近时刻 j 的状态state0
'''
if ctype != ctype0 and state0==2:
hstream[i, 4] = state0
elif ctype != ctype0 and state0==1:
hstream[i, 4] = state0
else:
hstream[i, 4] = state
'''========================== 5 结果显示 ================================'''
frstate_0[:, 1] = frstate_0[:, 1]-tmin
frstate_1[:, 1] = frstate_1[:, 1]-tmin
tstream[:, 2] = tstream[:, 2]-tmin
fig, (ax1, ax2, ax3, ax4, ax5, ax6) = plt.subplots(6, 1)
during = np.max(time_stream[:, 2])
ax1.plot(weights[:, 2]-tmin, weights[:, 3], 'bo-', linewidth=1, markersize=4)
ax1.plot(weights[:, 2]-tmin, weights[:, 4], 'mx-', linewidth=1, markersize=4)
ax1.plot(weights[:, 2]-tmin, weights[:, 5], 'gx-', linewidth=1, markersize=4)
ax1.set_xlim([0, during])
ax1.set_title('Weight (gram)')
ax2.plot(frstate_0[:, 1], frstate_0[:, 3], 'rx-', linewidth=1, markersize=8)
ax2.plot(frstate_0[:, 1], frstate_0[:, 2], 'bo-', linewidth=1, markersize=4)
ax2.set_xlim([0, during])
ax2.set_title('Back Camera')
ax3.plot(frstate_1[:, 1], frstate_1[:, 3], 'rx-', linewidth=1, markersize=8)
ax3.plot(frstate_1[:, 1], frstate_1[:, 2], 'bo-', linewidth=1, markersize=4)
ax3.set_xlim([0, during])
ax3.set_title('Front Camera')
ax4.plot(tstream[:, 2], tstream[:, 5], 'bx-', linewidth=1, markersize=4)
ax4.set_xlim([0, during])
ax4.set_title('CV State')
ax5.plot(time_stream[:, 2], time_stream[:, 7], 'gx-', linewidth=1, markersize=4)
ax5.set_xlim([0, during])
ax5.set_title('Cart State')
ax6.plot(hstream[:, 2]-tmin, hstream[:, 4], 'gx-', linewidth=1, markersize=4)
ax6.set_xlim([0, during])
ax6.set_title('Hand State')
plt.show()
if spath:
plt.savefig(spath)
return tmin, MotionSlice
def splitevent(imgpath, MotionSlice):
suffix = '.png'
imgfiles = [f for f in os.listdir(imgpath) if f.endswith(suffix)]
timestamp = np.array([int(f.split('_')[2]) for f in imgfiles])
indexes = []
k = 0
for t0, t1 in MotionSlice:
idx0 = set(np.where(timestamp >= t0)[0])
idx1 = set(np.where(timestamp <= t1)[0])
idx2 = list(idx0.intersection(idx1))
files = [imgfiles[i] for i in idx2]
for filename in files:
file, ext = os.path.splitext(filename)
newname = file + f'_{k}.png'
os.rename(os.path.join(imgpath, filename), os.path.join(imgpath, newname))
k += 1
print("Done!")
def runyolo():
eventdirs = r"\\192.168.1.28\share\个人文件\wqg\realtime\eventdata"
savedir = r"\\192.168.1.28\share\个人文件\wqg\realtime\result"
k = 0
for edir in os.listdir(eventdirs):
edir = "1731316835560"
source = os.path.join(eventdirs, edir)
files = filesort(source)
for flist in files:
run_yolo(flist, savedir)
k += 1
if k==1:
break
def run_tracking(trackboxes, MotionSlice):
pass
def read_wsensor(filepath):
WeightDict = OrderedDict()
with open(filepath, 'r', encoding='utf-8') as f:
lines = f.readlines()
clean_lines = [line.strip().replace("'", '').replace('"', '') for line in lines]
for i, line in enumerate(clean_lines):
line = line.strip()
line = line.strip()
if line.find(':') < 0: continue
# if line.find("Weight") >= 0:
# label = "Weight"
# continue
keyword = line.split(':')[0]
value = line.split(':')[1]
# if label == "Weight":
if len(keyword) and len(value):
vdata = [float(s) for s in value.split(',') if len(s)]
WeightDict[keyword] = vdata[-1]
weights = [(float(t), w) for t, w in WeightDict.items()]
weights = np.array(weights).astype(np.int64)
return weights
def show_seri():
datapath = r"\\192.168.1.28\share\个人文件\wqg\realtime\eventdata\1731316835560"
savedir = r"\\192.168.1.28\share\个人文件\wqg\realtime\1"
imgdir = datapath.split('\\')[-2] + "_" + datapath.split('\\')[-1]
imgpath = os.path.join(savedir, imgdir)
eventname = Path(datapath).stem
datafiles = sorted(glob.glob(os.path.join(datapath, '*.npy')))
periods, trackboxes, hands = [], [], []
win_width = 12
for npypath in datafiles:
CameraType = Path(npypath).stem.split('_')[-1]
tkboxes = np.load(npypath)
trackboxes.append((CameraType, tkboxes))
period, handState = devide_motion_state(tkboxes, win_width)
periods.append((int(CameraType), period))
hands.append((int(CameraType), handState))
'''===============读取重力信号数据==================='''
seneorfile = os.path.join(datapath, 'sensor.txt')
weights = read_wsensor(seneorfile)
# weights = [(float(t), w) for t, w in WeightDict.items()]
# weights = np.array(weights)
'''===============重力、图像信息融合==================='''
spath = os.path.join(savedir, f"{eventname}.png" )
tmin, MotionSlice = state_measure(periods, weights, hands, spath)
# 第一次运行时用于更改图像文件名
# rerename(imgpath)
# rename(imgpath, tmin)
# splitevent(imgpath, MotionSlice)
def main():
# runyolo()
show_seri()
if __name__ == '__main__':
main()
# imgpaths = r"\\192.168.1.28\share\realtime\result\eventdata_1728978106733"
# rerename(imgpaths)