diff --git a/contrast/__pycache__/event_test.cpython-39.pyc b/contrast/__pycache__/event_test.cpython-39.pyc index 0118aa4..4f87a3f 100644 Binary files a/contrast/__pycache__/event_test.cpython-39.pyc and b/contrast/__pycache__/event_test.cpython-39.pyc differ diff --git a/contrast/event_test.py b/contrast/event_test.py index c4eb5a0..8346eb8 100644 --- a/contrast/event_test.py +++ b/contrast/event_test.py @@ -169,9 +169,6 @@ def calc_simil(event, stdfeat): Similar = None # 在event.front_feats和event.back_feats同时为空时 return Similar - - - def simi_matrix(): diff --git a/contrast/feat_extract/__pycache__/config.cpython-39.pyc b/contrast/feat_extract/__pycache__/config.cpython-39.pyc index 2141ac1..d04ca13 100644 Binary files a/contrast/feat_extract/__pycache__/config.cpython-39.pyc and b/contrast/feat_extract/__pycache__/config.cpython-39.pyc differ diff --git a/contrast/feat_extract/__pycache__/inference.cpython-39.pyc b/contrast/feat_extract/__pycache__/inference.cpython-39.pyc index 7a0a3ca..d37e167 100644 Binary files a/contrast/feat_extract/__pycache__/inference.cpython-39.pyc and b/contrast/feat_extract/__pycache__/inference.cpython-39.pyc differ diff --git a/contrast/feat_extract/config.py b/contrast/feat_extract/config.py index 2fc5c78..bfc5af4 100644 --- a/contrast/feat_extract/config.py +++ b/contrast/feat_extract/config.py @@ -61,8 +61,9 @@ class Config: test_val = "D:/比对/cl" # test_val = "./data/test_data_100" - test_model = "checkpoints/best_resnet18_v12.pth" + # test_model = "checkpoints/best_resnet18_v12.pth" # test_model = "checkpoints/zhanting_res_801.pth" + test_model = "checkpoints/zhanting_res_abroad_8021.pth" diff --git a/contrast/feat_extract/inference.py b/contrast/feat_extract/inference.py index 70ff84a..5c75ed8 100644 --- a/contrast/feat_extract/inference.py +++ b/contrast/feat_extract/inference.py @@ -61,8 +61,17 @@ class FeatsInterface: batch_patches = [] patches = [] for i, img in enumerate(images): - img = img.copy() - patch = self.transform(img) + img = img.copy() + + ## 对 img 进行补黑边,生成新的图像new_img + width, height = img.size + new_size = max(width, height) + new_img = Image.new("RGB", (new_size, new_size), (0, 0, 0)) + paste_x = (new_size - width) // 2 + paste_y = (new_size - height) // 2 + new_img.paste(img, (paste_x, paste_y)) + + patch = self.transform(new_img) if str(self.device) != "cpu": patch = patch.to(device=self.device).half() else: @@ -107,10 +116,12 @@ class FeatsInterface: patch = self.transform(img1) # patch = patch.to(device=self.device).half() - if str(self.device) != "cpu": - patch = patch.to(device=self.device).half() - else: - patch = patch.to(device=self.device) + # if str(self.device) != "cpu": + # patch = patch.to(device=self.device).half() + # patch = patch.to(device=self.device) + # else: + # patch = patch.to(device=self.device) + patch = patch.to(device=self.device) patches.append(patch) if (d + 1) % self.batch_size == 0: diff --git a/contrast/genfeats.py b/contrast/genfeats.py index ad3d0fd..8c4a6f6 100644 --- a/contrast/genfeats.py +++ b/contrast/genfeats.py @@ -158,6 +158,8 @@ def stdfeat_infer(imgPath, featPath, bcdSet=None): feature /= np.linalg.norm(feature, axis=1)[:, None] + feature_ft32 = feature.astype(np.float32) + # float16 feature_ft16 = feature.astype(np.float16) feature_ft16 /= np.linalg.norm(feature_ft16, axis=1)[:, None] @@ -173,7 +175,7 @@ def stdfeat_infer(imgPath, featPath, bcdSet=None): ##================== float32 stdbDict["barcode"] = barcode stdbDict["imgpaths"] = imgpaths - stdbDict["feats_ft32"] = feature + stdbDict["feats_ft32"] = feature_ft32 stdbDict["feats_ft16"] = feature_ft16 stdbDict["feats_uint8"] = feature_uint8 @@ -202,9 +204,13 @@ def gen_bcd_features(imgpath, bcdpath, featpath, bcdSet=None): stdfeat_infer(bcdpath, featpath, bcdSet) def main(): - imgpath = r"\\192.168.1.28\share\数据\已完成数据\展厅数据\v1.0\比对数据\整理\zhantingBase" - bcdpath = r"D:\exhibition\dataset\bcdpath" - featpath = r"D:\exhibition\dataset\feats" + imgpath = r"\\192.168.1.28\share\数据\已完成数据\展厅数据\v2.0_abroad\比对数据\all_base_二筛" + bcdpath = r"D:\exhibition\dataset\bcdpath_abroad" + featpath = r"D:\exhibition\dataset\feats_abroad" + if not os.path.exists(bcdpath): + os.makedirs(bcdpath) + if not os.path.exists(featpath): + os.makedirs(featpath) gen_bcd_features(imgpath, bcdpath, featpath) diff --git a/contrast/one2n_contrast.py b/contrast/one2n_contrast.py index 83458ba..fdc6789 100644 --- a/contrast/one2n_contrast.py +++ b/contrast/one2n_contrast.py @@ -92,9 +92,9 @@ def simi_calc(event, o2nevt, typee=None): feat2 = o2nevt.front_feats '''自定义事件特征选择''' - if typee==3: - feat1 = event.feats_compose - feat2 = o2nevt.feats_compose + if typee==3 and len(event.feats_compose) and len(o2nevt.feats_compose): + feat1 = [event.feats_compose] + feat2 = [o2nevt.feats_compose] if len(feat1) and len(feat2): @@ -109,15 +109,18 @@ def one2n_pr(evtDicts, pattern=1): ''' pattern: 1: process.data 中记录的相似度 - 2: 根据 process.data 中标记的 type 选择特征计算 - 3: 以其它方式选择特征计算 + 2: 根据 process.data 中标记的 type 选择特征计算相似度 + 3: 以其它方式选择特征计算相似度 ''' tpevents, fnevents, fpevents, tnevents = [], [], [], [] tpsimi, fnsimi, tnsimi, fpsimi = [], [], [], [] - errorFile_one2n = [] + one2nFile, errorFile_one2n = [], [] for evtname, event in evtDicts.items(): evt_names, evt_barcodes, evt_similars, evt_types = [], [], [], [] + + if len(event.barcode)==0: + continue for ndict in event.one2n: nname = ndict["event"] @@ -149,6 +152,13 @@ def one2n_pr(evtDicts, pattern=1): continue evt_similars.append(simival) + ## process.data的oneTon的各项中,均不包括当前事件的barcode + if event.barcode not in evt_barcodes: + errorFile_one2n.append(evtname) + continue + else: + one2nFile.append(evtname) + if len(evt_names)==len(evt_barcodes) and len(evt_barcodes)==len(evt_similars) \ and len(evt_similars)==len(evt_types) and len(evt_names)>0: @@ -166,7 +176,7 @@ def one2n_pr(evtDicts, pattern=1): elif bcd!=event.barcode and simi!=maxsim: tnsimi.append(simi) tnevents.append(evtname) - elif bcd!=event.barcode and simi==maxsim and event.barcode in evt_barcodes: + elif bcd!=event.barcode and simi==maxsim: fpsimi.append(simi) fpevents.append(evtname) else: @@ -187,7 +197,7 @@ def one2n_pr(evtDicts, pattern=1): TN = sum(np.array(tnsimi) < th) PPrecise.append(TP/(TP+FP+1e-6)) - PRecall.append(TP/(len(tpsimi)+len(fnsimi)+1e-6)) + PRecall.append(TP/(len(one2nFile)+1e-6)) NPrecise.append(TN/(TN+FN+1e-6)) NRecall.append(TN/(len(tnsimi)+len(fpsimi)+1e-6)) @@ -202,23 +212,23 @@ def one2n_pr(evtDicts, pattern=1): ax.set_ylim([0, 1]) ax.grid(True) ax.set_title('1:n Precise & Recall') - ax.set_xlabel(f"Event Num: {len(tpsimi)+len(fnsimi)}") + ax.set_xlabel(f"Event Num: {len(one2nFile)}") ax.legend() plt.show() ## ============================= 1:n 直方图''' fig, axes = plt.subplots(2, 2) axes[0, 0].hist(tpsimi, bins=60, range=(-0.2, 1), edgecolor='black') axes[0, 0].set_xlim([-0.2, 1]) - axes[0, 0].set_title('TP') + axes[0, 0].set_title(f'TP: {len(tpsimi)}') axes[0, 1].hist(fpsimi, bins=60, range=(-0.2, 1), edgecolor='black') axes[0, 1].set_xlim([-0.2, 1]) - axes[0, 1].set_title('FP') + axes[0, 1].set_title(f'FP: {len(fpsimi)}') axes[1, 0].hist(tnsimi, bins=60, range=(-0.2, 1), edgecolor='black') axes[1, 0].set_xlim([-0.2, 1]) - axes[1, 0].set_title('TN') + axes[1, 0].set_title(f'TN: {len(tnsimi)}') axes[1, 1].hist(fnsimi, bins=60, range=(-0.2, 1), edgecolor='black') axes[1, 1].set_xlim([-0.2, 1]) - axes[1, 1].set_title('FN') + axes[1, 1].set_title(f'FN: {len(fnsimi)}') plt.show() return fpevents @@ -226,14 +236,14 @@ def one2n_pr(evtDicts, pattern=1): def main(): '''1. 生成事件字典并保存至 eventDataPath, 只需运行一次 ''' - init_eventdict(eventSourcePath, stype="source") + init_eventdict(eventSourcePath, stype="data") '''2. 读取事件字典 ''' evtDicts = read_eventdict(eventDataPath) '''3. 1:n 比对事件评估 ''' - fpevents = one2n_pr(evtDicts, pattern=3) + fpevents = one2n_pr(evtDicts, pattern=1) fpErrFile = str(Path(resultPath).joinpath("one2n_fp_Error.txt")) with open(fpErrFile, "w") as file: @@ -243,15 +253,16 @@ def main(): if __name__ == '__main__': - eventSourcePath = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\result\ShoppingDict_pkfile" - resultPath = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\result\contrast" + eventSourcePath = r"\\192.168.1.28\share\测试视频数据以及日志\海外展厅测试数据\比对数据" + resultPath = r"\\192.168.1.28\share\测试视频数据以及日志\海外展厅测试数据\testing" eventDataPath = os.path.join(resultPath, "evtobjs") - similPath = os.path.join(resultPath, "simidata") if not os.path.exists(eventDataPath): os.makedirs(eventDataPath) - if not os.path.exists(similPath): - os.makedirs(similPath) + + # similPath = os.path.join(resultPath, "simidata") + # if not os.path.exists(similPath): + # os.makedirs(similPath) main() diff --git a/contrast/one2one_contrast.py b/contrast/one2one_contrast.py index 8ccc34e..db02389 100644 --- a/contrast/one2one_contrast.py +++ b/contrast/one2one_contrast.py @@ -626,12 +626,13 @@ if __name__ == '__main__': # eventSourcePath = [r"\\192.168.1.28\share\测试视频数据以及日志\各模块测试记录\展厅测试\1129_展厅模型v801测试组测试"] - stdSamplePath = r"\\192.168.1.28\share\数据\已完成数据\比对数据\barcode\all_totalBarocde\totalBarcode" - stdBarcodePath = r"D:\全实时\source_data\bcdpath" - stdFeaturePath = r"D:\全实时\source_data\stdfeats" + stdSamplePath = r"\\192.168.1.28\share\数据\已完成数据\展厅数据\v2.0_abroad\比对数据\all_base_二筛" + stdBarcodePath = r"\\192.168.1.28\share\测试视频数据以及日志\海外展厅测试数据\testing\bcdpath" + stdFeaturePath = r"\\192.168.1.28\share\测试视频数据以及日志\海外展厅测试数据\testing\stdfeats" - eventSourcePath = [r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\result\ShoppingDict_pkfile"] - resultPath = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\result\contrast" + eventSourcePath = [r"\\192.168.1.28\share\测试视频数据以及日志\海外展厅测试数据\比对数据"] + + resultPath = r"\\192.168.1.28\share\测试视频数据以及日志\海外展厅测试数据\testing\evtobjs" eventDataPath = os.path.join(resultPath, "evtobjs") similPath = os.path.join(resultPath, "simidata") if not os.path.exists(eventDataPath): @@ -639,7 +640,7 @@ if __name__ == '__main__': if not os.path.exists(similPath): os.makedirs(similPath) - # test_one2one() + test_one2one() test_one2SN() diff --git a/contrast/onsite_contrast_pr.py b/contrast/onsite_contrast_pr.py index 5786d93..f774d4f 100644 --- a/contrast/onsite_contrast_pr.py +++ b/contrast/onsite_contrast_pr.py @@ -495,7 +495,7 @@ def contrast_pr(paths): if __name__ == "__main__": - evtpaths = r"D:\全实时\source_data\2024122416" + evtpaths = r"\\192.168.1.28\share\测试视频数据以及日志\算法全流程测试\202412\images" contrast_pr(evtpaths) diff --git a/contrast/utils/__pycache__/event.cpython-39.pyc b/contrast/utils/__pycache__/event.cpython-39.pyc index ab0537d..4627a1a 100644 Binary files a/contrast/utils/__pycache__/event.cpython-39.pyc and b/contrast/utils/__pycache__/event.cpython-39.pyc differ diff --git a/contrast/utils/event.py b/contrast/utils/event.py index df25689..4cb7367 100644 --- a/contrast/utils/event.py +++ b/contrast/utils/event.py @@ -202,10 +202,10 @@ class ShoppingEvent: self.front_trackingfeats = frontdata[5] '''===========对应于 0/1_tracking_output.data =============================''' - self.back_boxes = back_outdata[0] - self.back_feats = back_outdata[1] - self.front_boxes = front_outdata[0] - self.front_feats = front_outdata[1] + self.back_boxes = back_outdata + self.back_feats = back_outdata + self.front_boxes = front_outdata + self.front_feats = front_outdata def from_datafile(self, eventpath): diff --git a/pipeline.py b/pipeline.py index a2600e7..7a7d3c9 100644 --- a/pipeline.py +++ b/pipeline.py @@ -103,14 +103,16 @@ def pipeline( # if pf_path.exists(): # return - + + + '''====================== 构造 ShoppingDict 模块 =======================''' ShoppingDict = {"eventPath": eventpath, "eventName": evtname, "barcode": barcode, "eventType": '', # "input", "output", "other" "frontCamera": {}, "backCamera": {}, - "one2n": [] + "one2n": [] # } @@ -121,7 +123,7 @@ def pipeline( for vpath in vpaths: - '''相机事件字典构造''' + '''================= 1. 构造相机事件字典 =================''' CameraEvent = {"cameraType": '', # "front", "back" "videoPath": '', "imagePaths": [], @@ -140,21 +142,21 @@ def pipeline( if bname.split('_')[0] == "1" or bname.find('front')>=0: CameraEvent["cameraType"] = "front" - '''事件结果存储文件夹''' - + '''================= 2. 事件结果存储文件夹 =================''' if isinstance(vpath, list): savepath_pipeline_imgs = savepath_pipeline / Path("images") else: savepath_pipeline_imgs = savepath_pipeline / Path(str(Path(vpath).stem)) + if not savepath_pipeline_imgs.exists(): - savepath_pipeline_imgs.mkdir(parents=True, exist_ok=True) - + savepath_pipeline_imgs.mkdir(parents=True, exist_ok=True) + savepath_pipeline_subimgs = savepath_pipeline / Path("subimgs") if not savepath_pipeline_subimgs.exists(): savepath_pipeline_subimgs.mkdir(parents=True, exist_ok=True) - '''Yolo + Resnet + Tracker''' + '''================= 3. Yolo + Resnet + Tracker =================''' optdict["source"] = vpath optdict["save_dir"] = savepath_pipeline_imgs @@ -162,11 +164,8 @@ def pipeline( CameraEvent["yoloResnetTracker"] = yrtOut - - # bboxes = np.empty((0, 9), dtype = np.float32) - # for frameDict in yrtOut: - # bboxes = np.concatenate([bboxes, frameDict["tboxes"]], axis=0) - + '''================= 4. tracking =================''' + '''(1) 生成用于 tracking 模块的 boxes、feats''' trackerboxes = np.empty((0, 9), dtype=np.float64) trackefeats = {} for frameDict in yrtOut: @@ -179,7 +178,7 @@ def pipeline( trackefeats.update({f"{fid}_{bid}": ffeats[f"{fid}_{bid}"]}) - '''tracking''' + '''(2) tracking, 后摄''' if CameraEvent["cameraType"] == "back": vts = doBackTracks(trackerboxes, trackefeats) vts.classify() @@ -187,7 +186,8 @@ def pipeline( CameraEvent["tracking"] = vts ShoppingDict["backCamera"] = CameraEvent - + + '''(2) tracking, 前摄''' if CameraEvent["cameraType"] == "front": vts = doFrontTracks(trackerboxes, trackefeats) vts.classify() @@ -196,11 +196,12 @@ def pipeline( CameraEvent["tracking"] = vts ShoppingDict["frontCamera"] = CameraEvent - + '''========================== 保存模块 =================================''' + '''(1) 保存 ShoppingDict 事件''' with open(str(pf_path), 'wb') as f: pickle.dump(ShoppingDict, f) - - + + '''(2) 保存 Tracking 输出的运动轨迹子图,并记录相似度''' for CamerType, vts in event_tracks: if len(vts.tracks)==0: continue if CamerType == 'front': @@ -215,16 +216,14 @@ def pipeline( imgdict.update(y["imgs"]) featdict.update(y["feats"]) simidict.update(y["featsimi"]) - - + for track in vts.Residual: if isinstance(track, np.ndarray): save_subimgs(imgdict, track, savepath_pipeline_subimgs, ctype, featdict) else: save_subimgs(imgdict, track.slt_boxes, savepath_pipeline_subimgs, ctype, featdict) - - '''轨迹显示模块''' + '''(3) 轨迹显示与保存''' illus = [None, None] for CamerType, vts in event_tracks: if len(vts.tracks)==0: continue @@ -279,15 +278,15 @@ def main(): if item.is_dir(): # item = evtdir/Path("20241209-160201-b97f7a0e-7322-4375-9f17-c475500097e9_6926265317292") parmDict["eventpath"] = item - pipeline(**parmDict) + # pipeline(**parmDict) - # try: - # pipeline(**parmDict) - # except Exception as e: - # errEvents.append(str(item)) - k+=1 - if k==2: - break + try: + pipeline(**parmDict) + except Exception as e: + errEvents.append(str(item)) + # k+=1 + # if k==100: + # break errfile = os.path.join(parmDict["savepath"], f'error_events.txt') with open(errfile, 'w', encoding='utf-8') as f: diff --git a/track_reid.py b/track_reid.py index 9a3f029..0c1843a 100644 --- a/track_reid.py +++ b/track_reid.py @@ -216,7 +216,7 @@ def yolo_resnet_tracker( # Rescale boxes from img_size to im0 size det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() - # det = det.cpu().numpy() + det = det.cpu().numpy() ## ================================================================ writed by WQG '''tracks: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index] 0 1 2 3 4 5 6 7 8 diff --git a/tracking/module_analysis.py b/tracking/module_analysis.py index 4c653f2..8318163 100644 --- a/tracking/module_analysis.py +++ b/tracking/module_analysis.py @@ -2,7 +2,10 @@ """ Created on Thu May 30 14:03:03 2024 -现场测试性能分析 +轨迹分析现场测试性能分析: +(1) 读取 data 文件中的轨迹数据,绘制轨迹图 +(2) 读取本地运行 Yolo+Rsenet+Tracker+Tracking 的数据,绘制轨迹图 + @author: ym """