轨迹数为空.txt时,遍历文件中事件名,重新跑pipline写入轨迹数

This commit is contained in:
jiajie555
2025-04-18 15:17:46 +08:00
parent 010f5c445a
commit b0ce11f987
2 changed files with 43 additions and 17 deletions

View File

@ -142,13 +142,15 @@ def show_result(eventpath, event_tracks, yrtDict, savepath_pipe):
def pipeline(dict_data, def pipeline(dict_data,
pickle_exist,
eventpath, eventpath,
SourceType, SourceType,
weights, weights,
DataType = "raw", #raw, pkl: images or videos, pkl, pickle file DataType = "raw", #raw, pkl: images or videos, pkl, pickle file
YoloVersion="V5", YoloVersion="V5",
savepath = None, savepath = None,
saveimages = True saveimages = True,
): ):
## 构造购物事件字典 ## 构造购物事件字典
@ -172,10 +174,11 @@ def pipeline(dict_data,
yrt_out = [] yrt_out = []
if DataType == "raw": if DataType == "raw":
### 不重复执行已经过yolo-resnet-tracker if not pickle_exist:
if pklpath.exists(): ### 不重复执行已经过yolo-resnet-tracker
print(f"Pickle file have saved: {evtname}.pickle") if pklpath.exists():
return print(f"Pickle file have saved: {evtname}.pickle")
return
if SourceType == "video": if SourceType == "video":
vpaths = get_video_pairs(eventpath) vpaths = get_video_pairs(eventpath)
@ -441,7 +444,8 @@ def execute_pipeline(evtdir = r"D:\datasets\ym\后台数据\unzip",
weight_yolo_v5 = r'./ckpts/best_cls10_0906.pt' , weight_yolo_v5 = r'./ckpts/best_cls10_0906.pt' ,
weight_yolo_v10 = r'./ckpts/best_v10s_width0375_1205.pt', weight_yolo_v10 = r'./ckpts/best_v10s_width0375_1205.pt',
saveimages = True, saveimages = True,
max_col = 12 max_col = 12,
track_txt = ''
): ):
''' '''
运行函数 pipeline(),遍历事件文件夹,每个文件夹是一个事件 运行函数 pipeline(),遍历事件文件夹,每个文件夹是一个事件
@ -481,18 +485,36 @@ def execute_pipeline(evtdir = r"D:\datasets\ym\后台数据\unzip",
if csv_data == '': if csv_data == '':
with open('no_datacsv.txt', 'a') as f: with open('no_datacsv.txt', 'a') as f:
f.write(str(date_file) + '\n') f.write(str(date_file) + '\n')
for item in date_file.iterdir(): if len(track_txt) == 0: ## 无track_txt时遍历文件夹下的所有文件
# dict_data = {} pickle_exist = False
if item.is_dir(): for item in date_file.iterdir():
# item = evtdir/Path("20241212-171505-f0afe929-fdfe-4efa-94d0-2fa748d65fbb_6907992518930") # dict_data = {}
if item.is_dir():
# item = evtdir/Path("20241212-171505-f0afe929-fdfe-4efa-94d0-2fa748d65fbb_6907992518930")
parmDict["eventpath"] = item
event_name = str(item.name)
dict_data = get_process_csv_data(csv_data, item)
dict_data_all = pipeline(dict_data, pickle_exist, **parmDict)
if dict_data_all is not None: #已保存pickle文件的事件返回为None
# print('dict_data_all', dict_data_all)
excelWriter.write_simi_add(wb, ws, sheet, max_col, event_name, dict_data_all, headers, excel_name)
else: ## 有track_txt时遍历track_txt文件中的事件
pickle_exist = True ##不判断pickle文件是否存在的标志
txt_path = os.path.join(date_file, track_txt)
with open(txt_path, 'r') as f:
events = f.readlines()
events = [i.strip() for i in events]
for event in events:
item = date_file / event
parmDict["eventpath"] = item parmDict["eventpath"] = item
event_name = str(item.name) event_name = str(item.name)
dict_data = get_process_csv_data(csv_data, item) dict_data = get_process_csv_data(csv_data, item)
print('dict_data', dict_data) dict_data_all = pipeline(dict_data, pickle_exist, **parmDict)
dict_data_all = pipeline(dict_data, **parmDict) if dict_data_all is not None: # 已保存pickle文件的事件返回为None
if dict_data_all is not None: #已保存pickle文件的事件返回为None
# print('dict_data_all', dict_data_all) # print('dict_data_all', dict_data_all)
excelWriter.write_simi_add(wb, ws, sheet, max_col, event_name, dict_data_all, headers, excel_name) excelWriter.write_simi_add(wb, ws, sheet, max_col, event_name, dict_data_all, headers,
excel_name)
# try: # try:
# pipeline(**parmDict) # pipeline(**parmDict)
@ -516,6 +538,7 @@ if __name__ == "__main__":
datapath = '/home/yujia/yj/gpu_code/callback_data_test_0417/' datapath = '/home/yujia/yj/gpu_code/callback_data_test_0417/'
savepath = '/home/yujia/yj/gpu_code/result_0417_v10/' savepath = '/home/yujia/yj/gpu_code/result_0417_v10/'
max_col = 12 ##excel表格列索引从0开始从这列开始写入代码解析内容 max_col = 12 ##excel表格列索引从0开始从这列开始写入代码解析内容
track_txt = '轨迹数为空.txt'
execute_pipeline(evtdir=datapath, execute_pipeline(evtdir=datapath,
DataType = "raw", # raw, pkl DataType = "raw", # raw, pkl
@ -526,7 +549,8 @@ if __name__ == "__main__":
weight_yolo_v5 = '/home/yujia/yj/gpu_code/ckpts/best_cls10_0906.pt' , weight_yolo_v5 = '/home/yujia/yj/gpu_code/ckpts/best_cls10_0906.pt' ,
weight_yolo_v10 = '/home/yujia/yj/gpu_code/ckpts/best_v10s_width0375_1205.pt', weight_yolo_v10 = '/home/yujia/yj/gpu_code/ckpts/best_v10s_width0375_1205.pt',
saveimages = False, saveimages = False,
max_col = max_col max_col = max_col,
track_txt = track_txt
) )

View File

@ -43,11 +43,13 @@ def anlay_xlsx_filter_events(file_path, name, ch='轨迹数', classFlag=False):
name_ = replace_str(name) name_ = replace_str(name)
if ch in name: if ch in name:
ch_ = ch + '为空' ch_ = ch + '为空'
txt_path = os.path.join(date_path, date + '_' + ch_ + '.txt') # txt_path = os.path.join(date_path, date + '_' + ch_ + '.txt')
txt_path = os.path.join(date_path, ch_ + '.txt')
filter_df = df[(df[name].isnull())] filter_df = df[(df[name].isnull())]
else: else:
txt_path = os.path.join(date_path, date + '_' + name_ + '.txt') # txt_path = os.path.join(date_path, date + '_' + name_ + '.txt')
txt_path = os.path.join(date_path, name_ + '.txt')
filter_df = df[(df[name].notna()) & (df['是否购物现场'].str.contains(''))] filter_df = df[(df[name].notna()) & (df['是否购物现场'].str.contains(''))]
try: try: