From e986ec060bfd2e0ad5e00228f8c89a15832c7b2e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E5=BA=86=E5=88=9A?= Date: Thu, 18 Jul 2024 17:52:12 +0800 Subject: [PATCH] modified for site test --- data.txt | 1 + featureVal.py | 109 ++- .../detect - 快捷方式.lnk | Bin track_reid.py | 29 +- .../contrast_analysis.cpython-39.pyc | Bin 0 -> 7703 bytes tracking/contrast_analysis.py | 365 ++++++++++ tracking/data/说明文档.txt | 1 + .../__pycache__/dotracks.cpython-39.pyc | Bin 13131 -> 13177 bytes .../__pycache__/dotracks_back.cpython-39.pyc | Bin 5689 -> 5782 bytes .../__pycache__/dotracks_front.cpython-39.pyc | Bin 4497 -> 4675 bytes tracking/dotrack/dotracks.py | 7 +- tracking/dotrack/dotracks_back.py | 51 +- tracking/dotrack/dotracks_front.py | 40 +- tracking/goodmatch.py | 45 +- tracking/module_analysis.py | 390 +++++++++++ tracking/rename.py | 35 + tracking/test_tracking.py | 21 +- tracking/test_val.py | 223 ------ .../__pycache__/bot_sort.cpython-39.pyc | Bin 7148 -> 7158 bytes .../__pycache__/byte_tracker.cpython-39.pyc | Bin 14082 -> 14648 bytes tracking/trackers/bot_sort.py | 6 +- tracking/trackers/byte_tracker.py | 38 +- .../reid/__pycache__/config.cpython-39.pyc | Bin 769 -> 769 bytes .../__pycache__/reid_interface.cpython-39.pyc | Bin 3141 -> 3100 bytes tracking/trackers/reid/reid_interface.py | 3 +- .../__pycache__/drawtracks.cpython-39.pyc | Bin 9017 -> 9023 bytes .../__pycache__/mergetrack.cpython-39.pyc | Bin 3737 -> 3855 bytes .../utils/__pycache__/plotting.cpython-39.pyc | Bin 11425 -> 12836 bytes .../utils/__pycache__/proBoxes.cpython-39.pyc | Bin 2347 -> 2447 bytes .../utils/__pycache__/readData.cpython-39.pyc | Bin 0 -> 2167 bytes .../__pycache__/read_data.cpython-39.pyc | Bin 0 -> 4734 bytes .../read_pipeline_data.cpython-39.pyc | Bin 0 -> 3907 bytes tracking/utils/drawtracks.py | 53 +- tracking/utils/mergetrack.py | 43 +- tracking/utils/plotting.py | 56 +- tracking/utils/proBoxes.py | 11 +- tracking/utils/read_data.py | 236 +++++++ tracking/utils/read_pipeline_data.py | 250 +++++++ videos_select.py | 641 ++++++++++++++++++ 39 files changed, 2279 insertions(+), 375 deletions(-) create mode 100644 data.txt rename runs/detect/{加购_88 => 加购_88_}/detect - 快捷方式.lnk (100%) create mode 100644 tracking/__pycache__/contrast_analysis.cpython-39.pyc create mode 100644 tracking/contrast_analysis.py create mode 100644 tracking/data/说明文档.txt create mode 100644 tracking/module_analysis.py create mode 100644 tracking/rename.py delete mode 100644 tracking/test_val.py create mode 100644 tracking/utils/__pycache__/readData.cpython-39.pyc create mode 100644 tracking/utils/__pycache__/read_data.cpython-39.pyc create mode 100644 tracking/utils/__pycache__/read_pipeline_data.cpython-39.pyc create mode 100644 tracking/utils/read_data.py create mode 100644 tracking/utils/read_pipeline_data.py create mode 100644 videos_select.py diff --git a/data.txt b/data.txt new file mode 100644 index 0000000..2dba6cb --- /dev/null +++ b/data.txt @@ -0,0 +1 @@ +-0.011240 0.008260 -0.006586 -0.001030 -0.013720 -0.002563 -0.046692 0.001246 -0.004448 -0.000575 0.011596 -0.008257 0.013804 0.013636 -0.015231 0.009997 0.002080 -0.004984 -0.008263 -0.017190 0.015428 -0.023105 -0.032034 0.014638 0.017993 -0.020305 -0.026571 -0.013695 0.019300 -0.008173 -0.014615 -0.029739 -0.005307 0.004259 0.027220 -0.003702 0.014700 -0.007800 -0.003245 -0.033670 -0.046784 -0.014097 0.022587 -0.007240 0.020345 -0.007308 -0.032579 -0.026351 0.007981 -0.004793 -0.003228 -0.001727 -0.019257 -0.004215 -0.018438 0.022703 -0.007637 -0.009849 -0.039515 -0.063543 0.038563 0.024434 -0.020794 -0.013288 0.024293 0.005290 -0.023577 0.043458 -0.008223 0.009316 0.015879 -0.016333 0.008900 0.014967 0.023980 -0.007785 0.011072 0.014957 -0.014673 -0.015173 0.001238 -0.002713 -0.010376 -0.009099 -0.019973 0.036330 -0.025961 -0.004987 -0.003738 -0.029137 0.027006 0.013193 -0.013956 -0.010477 0.006946 0.003161 0.006327 -0.010057 -0.000473 0.039186 -0.017787 0.030310 0.001994 0.021185 -0.010288 -0.026407 -0.007325 -0.035419 0.013209 0.040455 0.022204 0.011113 0.013391 -0.025687 -0.008719 0.018367 -0.044993 0.000359 -0.027293 0.017411 0.016963 -0.012727 0.004216 -0.007090 0.020172 -0.007653 -0.003869 0.000472 -0.006563 -0.010175 -0.012288 0.030884 0.021227 -0.008667 0.001995 0.002351 0.001223 0.024315 0.048389 -0.016056 0.015207 0.035997 -0.017303 0.029428 -0.018798 0.009189 -0.008502 0.036859 -0.003675 -0.003153 -0.017599 -0.020731 0.023639 0.019200 0.017236 0.015245 0.000899 0.013015 -0.026410 0.003367 0.007493 0.006190 -0.008258 0.017456 0.007086 -0.015679 -0.035943 -0.028529 -0.029751 0.000321 0.027217 0.002749 -0.016362 0.003308 0.011506 0.005780 0.008492 -0.002685 -0.006707 -0.001248 -0.005391 -0.010571 0.000716 -0.015180 -0.008275 -0.002362 -0.002915 -0.011054 -0.007975 0.016847 -0.003256 0.004353 -0.015026 -0.007171 -0.019375 -0.002358 -0.029985 -0.004786 0.008605 0.000120 0.016673 -0.029609 -0.030924 0.004636 -0.022859 -0.002508 0.028345 -0.007889 0.017705 0.012368 -0.020287 -0.001889 0.008966 0.017198 0.031740 -0.016312 -0.029071 0.014328 -0.029138 -0.006111 -0.038278 -0.006854 -0.006448 -0.016257 -0.003441 -0.003229 -0.012162 -0.014835 0.011474 -0.010222 -0.017947 0.021293 -0.006472 0.003448 0.005727 -0.033055 0.005207 -0.008356 -0.015410 0.029921 -0.030446 0.000284 -0.029262 0.034003 0.017720 -0.029013 0.010341 -0.037284 0.006937 0.060924 0.015401 0.023268 0.009021 -0.028368 -0.019817 0.034878 diff --git a/featureVal.py b/featureVal.py index fa272a6..dd9fe3a 100644 --- a/featureVal.py +++ b/featureVal.py @@ -104,25 +104,76 @@ def inference_image(image, detections): return imgs, features -def test_dog(): +def readimg(): + imgpath = r"D:\datasets\ym\Img_ResnetData\result\0.png" + image = cv2.imread(imgpath) + img = cv2.resize(image, (224, 224)) + + cv2.imwrite('0_224x224.jpg', img) + + +def readdata(datapath): - datapath = r"D:\datasets\ym\Img_ResnetData\dog_224x224\dog_224x224.txt" with open(datapath, 'r') as file: lines = file.readlines() dlist = lines[0].split(',') dfloat = [float(d) for d in dlist] - afeat = np.array(dfloat).reshape(1, -1) + return afeat +def readrawimg(datapath): + with open(datapath, 'r') as file: + llines = file.readlines() + imgs = [] + + row = 224 + + for i in range(8): + lines = llines[i*224 : (i+1)*224] + - imgpath = r"D:\datasets\ym\Img_ResnetData\dog_224x224\dog_224x224.jpg" - image = cv2.imread(imgpath) + img = np.empty((224, 224, 0), dtype=np.float32) + imgr = np.empty((0, 224), dtype=np.float32) + imgg = np.empty((0, 224), dtype=np.float32) + imgb = np.empty((0, 224), dtype=np.float32) + + for line in lines: + dlist = line.split(' ')[0:224] + + img_r = np.array([float(s.split(',')[0]) for s in dlist], dtype=np.float32).reshape(1, -1) + img_g = np.array([float(s.split(',')[1]) for s in dlist], dtype=np.float32).reshape(1, -1) + img_b = np.array([float(s.split(',')[2]) for s in dlist], dtype=np.float32).reshape(1, -1) + + # img_r = [float(s.split(',')[0]) for s in dlist if len(s.split(',')[0].encode('utf-8')) == 4] + # img_g = [float(s.split(',')[1]) for s in dlist if len(s.split(',')[1].encode('utf-8')) == 4] + # img_b = [float(s.split(',')[2]) for s in dlist if len(s.split(',')[2].encode('utf-8')) == 4] + + imgr = np.concatenate((imgr, img_r), axis=0) + imgg = np.concatenate((imgg, img_g), axis=0) + imgb = np.concatenate((imgb, img_b), axis=0) + + imgr = imgr[:, :, None] + imgg = imgg[:, :, None] + imgb = imgb[:, :, None] + + img = np.concatenate((imgb, imgg, imgr), axis=2).astype(np.uint8) + + imgs.append(img) + + return imgs + + + + + +def inference(image): + patches = [] - img = image[:, :, ::-1].copy() # the model expects RGB inputs - patch = ReIDEncoder.transform(img) + image = image[:, :, ::-1].copy() # the model expects RGB inputs + patch = ReIDEncoder.transform(image) patch = patch.to(device=ReIDEncoder.device) @@ -132,29 +183,43 @@ def test_dog(): pred[torch.isinf(pred)] = 1.0 bfeat = pred.cpu().data.numpy() + return bfeat + + + +def test_img_feat(): + # datapath = r"D:\datasets\ym\Img_ResnetData\aa\aa.txt" + # afeat = readdata(datapath) + + imgpath = r"D:\datasets\ym\Img_ResnetData\aa\aa.jpg" + img = cv2.imread(imgpath) + bfeat = inference(img) + datapath = r"D:\datasets\ym\Img_ResnetData\rawimg\7.txt" + afeat = readdata(datapath) + + rawpath = r"D:\datasets\ym\Img_ResnetData\rawimg\28950640607_mat_rgb" + imgx = readrawimg(rawpath) + cv2.imwrite("rawimg.png", imgx[7]) + bfeatx = inference(imgx[7]) + + cost_matrix = 1 - np.maximum(0.0, cdist(afeat, bfeatx, 'cosine')) + + imgpath1 = r"D:\datasets\ym\Img_ResnetData\result\0_224x224.png" + img1 = cv2.imread(imgpath1) + bfeat1 = inference(img1) aafeat = afeat / np.linalg.norm(afeat, ord=2, axis=1, keepdims=True) bbfeat = bfeat / np.linalg.norm(bfeat, ord=2, axis=1, keepdims=True) - cost_matrix = 1 - np.maximum(0.0, cdist(aafeat, bbfeat, 'cosine')) - - - - - print("Done!!!") - - - - - + print("Done!!!") def main(): imgpath = r"D:\datasets\ym\Img_ResnetData\20240531-103547_0354b1cb-53fa-48de-86cd-ac3c5b127ada_6921168593576\3568800050000_0.jpeg" - datapath = r"D:\datasets\ym\Img_ResnetData\0_tracker_inout.data" + datapath = r"D:\datasets\ym\Img_ResnetData\20240531-103547_0354b1cb-53fa-48de-86cd-ac3c5b127ada_6921168593576\0_tracker_inout.data" savepath = r"D:\datasets\ym\Img_ResnetData\result" image = cv2.imread(imgpath) @@ -184,9 +249,11 @@ def main(): if __name__ == '__main__': - main() + # main() - # test_dog() + # readimg() + + test_img_feat() diff --git a/runs/detect/加购_88/detect - 快捷方式.lnk b/runs/detect/加购_88_/detect - 快捷方式.lnk similarity index 100% rename from runs/detect/加购_88/detect - 快捷方式.lnk rename to runs/detect/加购_88_/detect - 快捷方式.lnk diff --git a/track_reid.py b/track_reid.py index 6f107a9..adcc4ee 100644 --- a/track_reid.py +++ b/track_reid.py @@ -266,11 +266,7 @@ def run( # Rescale boxes from img_size to im0 size det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() - det = det.cpu().numpy() - det = np.concatenate([det[:, :4], np.arange(nd).reshape(-1, 1), det[:, 4:]], axis=-1) - - DetBoxes = np.concatenate([DetBoxes, det[:, :6]], axis=0) - + # det = det.cpu().numpy() ## ============================================================ 前后帧相同 boxes 的特征赋值 # def static_estimate(box1, box2, TH1=8, TH2=12): # dij_abs = max(np.abs(box1 - box2)) @@ -321,14 +317,9 @@ def run( '''================== 1. 存储 dets/subimgs/features Dict =============''' imgs, features = inference_image(im0, tracks) - TrackerFeats = np.concatenate([TrackerFeats, features], axis=0) - - - - - + imgdict = {} boxdict = {} featdict = {} @@ -339,8 +330,7 @@ def run( TracksDict[f"frame_{int(dataset.frame)}"] = {"imgs":imgdict, "boxes":boxdict, "feats":featdict} track_boxes = np.concatenate([track_boxes, tracks], axis=0) - - + '''================== 2. 提取手势位置 ===================''' # idx_0 = tracks[:, 6].astype(np.int_) == 0 # hn = 0 @@ -422,8 +412,10 @@ def run( trackdicts_dir = trackdicts_dir.joinpath(f'{filename}.pkl') with open(trackdicts_dir, 'wb') as file: pickle.dump(TracksDict, file) + + # np.save(f'{filename}.npy', DetBoxes) - + '''======================== 3. save hand_local data ==================''' # handlocal_dir = Path('./tracking/data/handlocal/') # if not handlocal_dir.exists(): @@ -539,17 +531,16 @@ def main_loop(opt): # r"D:\datasets\ym\广告板遮挡测试\8\2500441577966_20240508-175946_front_addGood_70f75407b7ae_155_17788571404.mp4" # ] - # files = [r"D:\datasets\ym\视频\20240529\110518062-090ac04c-0a8c-479f-bc18-cb3553c90683-0_seek0.017962635633665514.mp4"] - files = [r"D:\datasets\ym\视频\20240529\110518060-550b7c4d-9946-4aa4-9131-81008692cd65-1_seek0.7670042724609232.mp4"] + files = [r"D:\datasets\ym\广告板遮挡测试\8\6907149227609_20240508-174733_back_returnGood_70f754088050_425_17327712807.mp4"] for file in files: optdict["source"] = file run(**optdict) - # k += 1 - # if k == 3: - # break + k += 1 + if k == 1: + break elif os.path.isfile(p): optdict["source"] = p run(**vars(opt)) diff --git a/tracking/__pycache__/contrast_analysis.cpython-39.pyc b/tracking/__pycache__/contrast_analysis.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..092609ed936ef7306fa0b0106fb7ae3ccebf4ad4 GIT binary patch literal 7703 zcmbtZZ)_aLb>G?DyW874o{kj7KN7869NWigZ5|K`YW?Q6L}s zEnnK-o4GsQQHydy?n*Fy4px zVSbR0UT4k`euN+5W2jB>aXx{*N8F=)@`^70&V6J<17-HSR-D?wp0xR*9;ZLkXf%Uz z&=hg9(yTRwzdk&7dST8D+)8jklq*-f#$wTkv+inuR#4*Qpd3#Kx6Di2tzj7dt+J>z zxm&7wH8&oWJv5XMOU;#_wGtFre2|OswNej$sk!R<6`fYmc}%<$_ zWRa>d)L5Xm4bHf}sW0e}9vPv*jY;jY8EV|*37))dFipYQ32tzGQ72}1AYf^yrAr(( zfCJbb9D*ddML3u@wMop=z`XSQ4lR6#p%7H;oI(*NF4eGUu++TdHT*d3@JM%7!4jq<90PZNg$_~prm4sM~#@P$gVfw~`*%8hbu&G-E2)*2=P&1W&? z5YYWm6dJRct>@V&8(}$JmP||mULkepK6F+|{N!0Jp%Q498J){*78$7Pp^g(Vh`+Z@ za8E%P33V6JHi(}%W3$6y?Q(*rNz8ecP7ysxJ|uas_3=IjncX41-J$s*S1B;T_txFm z5H63C;BdKC!yzOT>l3x|CAWqnsjaw9QViDBv{QOg- z5;d>k`oS8Qo+#H^OXc;6a;35YkzOk)`lUtT@#z*6!xAGvi<4NziohY!F~-T7yXZEs zn$^ziG(1lE<*P2niE&`yAU{lqW(yQ!BUtjq1lsZ(RdIy6prA=B!CC;^NDGTTBu#jM=WGF zwJQl>hV1M5viW=;Gw6AR*Yc{5zt~DWH{uxxdX#d~U2{IqzDJpu)5}!bGnn0gKQA`Lr``ChH`mJ&W z(q3~LakAWMLC&NI6jN~yW~StOb+1+yUa%IAbSzFEOq?W=Bzb~}JNkw5j(+yMnpdts z@?_#yZNCU4$#ae6U@0s%hQBOoS$%{}K{Uk~j2K`@767vAQXI!{kR*WgS2Y*RTUx|! zu}JUQdI;nlWkDb^aFnp}W|V+PnIv9OGJsfZK$IbVM?+Y6`A%|kQVWtHM0FA(2H{Rc z7TKJ3DunQD>QOqd+UYP23U-*{NnpKkQ)}CyEya_eCapaBQbh44K~$gK?OQ&IxgomZ zUxQ3Li{DU)6@g+#Di677E6k9ZK%o*4X!a@-x!zc)i5$zIXm;ONi0n-!g+8Gncm4j4e(?Sq*SGGzx3%%g{U5#i0zCKo zZ+~n1PhJD`!F%7>`pS1z-!FdhwU0kJv-;HRDM?PbV-Ol3UYY@9>(!UHzuut;tnK!X zw{O2)sETskJ;w_VetP%8=GX4Odt>|MKi+!xl|l4bLcewUe~(^}_%SQ(kRq>})0bO| z%9H*Vo|w(VY<57126UeMl+^ib1onk&QhQk6#jjyd%shCAqzC?$X7=b-4gFHL)}I)R zSZMVBP0Ro1^@usl)?3Q#o~1OqODX1J9j+-wmW6u)&H<+mAJD3K0m831A#36^ZO!l+ zL2O=bdX3nqTs=j*q%$Eu3@xjFEpSDTG3iryMH z?AVlXoFONy(o*0{%NQRgBwCyT9l0MoJhfm%i6|MRLLGh+hM}!< zm=2gCD@u2~ryXTFwQMvL7;X5RO?b|WKZ|l<4q+s)#Oup90^@SBosuOyYna=F84zzT z-@q81#zY6OjstSthWH!656~)m^W~%AFdtf1beNLE=)=lGm^^9)>2Mg{6h>`>UD0H} z4O5kq1})RJRljUW3gQN4GGV@G4x(dX=QjqapfB5_G1N~3JJCX0nzV2BOj<^ow2{yb zN4PzyfrAHoJ3R=an*>f);{0t72LT;+JrZVnIHNtBQQ++I3L!oWKg@lYsv3w1GV{X1 zf$xwTSwGq;d*XD#cbB0i#|!Ena=dU!T}1rd=pojMhWLG;^c9~%^Ox*|_zd2~oYJO* z9|*4{zCa_NrQ#fl*n)HQuXzX;K2Oc0=aXuPc^dE>6+LAWB-KjM4w2eId96!j-<9f? zS#$#!-cl98rZXhm+SQ=cS*nwP&u)rRy&P1QVp|QA%9WN*Kvm@!rE;9^3`=$2$;f_5 z(~lE@S9g7f0su)#f&1FBKemVl>9_-}rPahH=*>DN2+oS%!xkKSm#i~1Ku#I0UVsvh zV6mm%t%4}~uYiL8X%reOK!Gia40O}5^--M^JBRiJz4a0OWAc5R6x<{w&$67+edi2O z2aSOn2L&EIh5skG0|*yP|Ar1_MJS7Km63SMz_h>-M5?w35pV(y<%L2+)XX-b2?WAW z=3dK0D~2I%=SPI6BgyhlmICrg|-+ z{s<^e$#F-c525~utQVpWgXZDzC}=*4+L5q;+DDYBQ$}!HiyrIERt+EH2<*c|+v3Sh z##57;>{lFvu?io<<>7HTb2@x{6VZ+TG7O(hb==l6VQzyA;Bl;S+Z? zZof(EJ{DvVH605-x~VgmtmW5%1>?65R^|h+{vKEp`(kB(C6>Od18Z_$tf3FU`XsO( z-4`qO0ay)SePmy(10R6(7D;U}zfyvv%kZdcKTa36UjEM3_rALQXJ38r&THGR{Sa~4 z_UkX*|I0t$dh5p&?#X~qqRjtR1!(sVi$PmrbaQKNvhG*dz(hf8s{JzH|p9n&|nTLqMi-g9K} zfYR5PEx{a<=1ZF_oxU>0WPO#@rAxmcJ0MMUoOD+apZKv=FJE!t3MlJ>JDw}bfh&F+ z!zfs}hRcv!EQ$)DaRORhJ_(m#RVo%yI9UpjN}b&dv}Vzgnq0|Vu1x*QyAqPNr0Sep>0sN|2igWjvSV;%6v^gg?V+*{E&|MSb|9!70;K2T{v8cqg21**NEV@62FYsc zE5N#{i8~bVhUw6*c9S}+a|<>B&+_!7Oy?{Y;3ecZU;|c_3GFZmn7T8k_KarYU*OA9 z@WZhCfzhoYxRcN-QZ+?;K2ARGF3%xwO3kUX^1c_q=fM`4sO(+qr{f6w{L%*r^f4C?lGtmTSo6Jb-M;+EX2<=P5t$A0&}IMs>S9$Hrd)5&%9C!-Lb zur}KS=lj=&ykxc3EC+Il;u;Y5Tirx=b+Nsh&QazUJ(N5A5@E{dlkzPxL0cS(Eu=n6 zbYnw&=TmAoPE!g<8R^k(HmI`=IZ*s2&=Kc$2jx-g#-?h((=;IIcgU)UJ5H*I+d+&g zQ~$pB16sx&MH}6ZwZqtf|0qUl`jm_iWjYgzLHVRSGR_ncno{A!A7NC*q_LfcG=yA$ zy!MfTQxpiv%!Dl4=7hu?d!@mE_n zZ)|`2RY_77Nb8a8$bBluCy$gY15Pu)cUtMZgg~z&Js6o`5fXtHsfef`_Z;VN$0!FL ztegzXomA(19a~xS^OOWgOP`~v(w0(n?-3670{rf0gA;Uql=Ie7mYFw9oM9g4X0lvD zTnCiAZ117n+4#xxx_}DD*Bq`#j3W`fo>wtEaXm7=s5MTRh}zfY8Lr|?TR$A=+zidT z+MCFwG3|@m3mQkyi)O=C?-^|d5S&zEe*Lp^r%!g5dXk(EzMBA0m!jWn{I2V-)Pj>r zYJ1#&M!0kg&mle;FOS2N~0KaQCQUrV=UY7>*DPvtITZ&l1z= z!;i*H`qOpkXT&WUVU=)w^ctm7k)5Ug55=h_zV6Uz(p479yR;H`wK##usI{hYL&LZx z(x-}=cM0Et)JF^TmftgE?BwJK4U{l0z_&a>NjPB;NyA_B<(*jCpm?JDm1$aG$fOT3 zP>}K}87H-k$M+2Ek(MFd6b+`&88V5i;y)PJ!;rA8$QfZ}OFz ndj^ct{9`C^9VHJg#g*Qplw-=_@0fMQddhmvp0bZv2Q2pA&@&JL literal 0 HcmV?d00001 diff --git a/tracking/contrast_analysis.py b/tracking/contrast_analysis.py new file mode 100644 index 0000000..c7d035a --- /dev/null +++ b/tracking/contrast_analysis.py @@ -0,0 +1,365 @@ +import os.path +import shutil + +import numpy as np +import matplotlib.pyplot as plt +import cv2 +from utils.plotting import Annotator, colors +import sys +sys.path.append(r"D:\DetectTracking") +from tracking.utils.read_data import extract_data, read_deletedBarcode_file, read_tracking_output +from tracking.utils.plotting import draw_tracking_boxes + +def showHist(err, correct): + err = np.array(err) + correct = np.array(correct) + + fig, axs = plt.subplots(2, 1) + axs[0].hist(err, bins=50, edgecolor='black') + axs[0].set_xlim([0, 1]) + axs[0].set_title('err') + + axs[1].hist(correct, bins=50, edgecolor='black') + axs[1].set_xlim([0, 1]) + axs[1].set_title('correct') + plt.show() + +def showgrid(recall, prec, ths): + # x = np.linspace(start=-0, stop=1, num=11, endpoint=True).tolist() + fig = plt.figure(figsize=(10, 6)) + plt.plot(ths, recall, color='red', label='recall') + plt.plot(ths, prec, color='blue', label='PrecisePos') + plt.legend() + plt.xlabel('threshold') + # plt.ylabel('Similarity') + plt.grid(True, linestyle='--', alpha=0.5) + plt.savefig('accuracy_recall_grid.png') + plt.show() + # plt.close() + + +def compute_recall_precision(err_similarity, correct_similarity): + ths = np.linspace(0, 1, 11) + recall, prec = [], [] + for th in ths: + TP = len([num for num in correct_similarity if num >= th]) + FP = len([num for num in err_similarity if num >= th]) + if (TP+FP) == 0: + prec.append(1) + recall.append(0) + else: + prec.append(TP / (TP + FP)) + recall.append(TP / (len(err_similarity) + len(correct_similarity))) + + showgrid(recall, prec, ths) + return recall, prec + + +# ============================================================================= +# def read_tracking_output(filepath): +# boxes = [] +# feats = [] +# with open(filepath, 'r', encoding='utf-8') as file: +# for line in file: +# line = line.strip() # 去除行尾的换行符和可能的空白字符 +# +# if not line: +# continue +# +# if line.endswith(','): +# line = line[:-1] +# +# data = np.array([float(x) for x in line.split(",")]) +# if data.size == 9: +# boxes.append(data) +# if data.size == 256: +# feats.append(data) +# +# return np.array(boxes), np.array(feats) +# ============================================================================= + +def read_tracking_imgs(imgspath): + ''' + input: + imgspath:该路径中的图像为Yolo算法的输入图像,640x512 + output: + imgs_0:后摄图像,根据 frameId 进行了排序 + imgs_1:前摄图像,根据 frameId 进行了排序 + ''' + imgs_0, frmIDs_0, imgs_1, frmIDs_1 = [], [], [], [] + + for filename in os.listdir(imgspath): + file, ext = os.path.splitext(filename) + flist = file.split('_') + if len(flist)==4 and ext==".jpg": + camID, frmID = flist[0], int(flist[-1]) + imgpath = os.path.join(imgspath, filename) + img = cv2.imread(imgpath) + + if camID=='0': + imgs_0.append(img) + frmIDs_0.append(frmID) + if camID=='1': + imgs_1.append(img) + frmIDs_1.append(frmID) + + if len(frmIDs_0): + indice = np.argsort(np.array(frmIDs_0)) + imgs_0 = [imgs_0[i] for i in indice ] + if len(frmIDs_1): + indice = np.argsort(np.array(frmIDs_1)) + imgs_1 = [imgs_1[i] for i in indice ] + + return imgs_0, imgs_1 + + +# ============================================================================= +# def draw_tracking_boxes(imgs, tracks): +# '''tracks: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index] +# 0 1 2 3 4 5 6 7 8 +# 关键:imgs中的次序和 track 中的 fid 对应 +# ''' +# subimgs = [] +# for *xyxy, tid, conf, cls, fid, bid in tracks: +# label = f'id:{int(tid)}_{int(cls)}_{conf:.2f}' +# +# annotator = Annotator(imgs[int(fid-1)].copy()) +# if cls==0: +# color = colors(int(cls), True) +# elif tid>0 and cls!=0: +# color = colors(int(tid), True) +# else: +# color = colors(19, True) # 19为调色板的最后一个元素 +# +# pt2 = [p/2 for p in xyxy] +# annotator.box_label(pt2, label, color=color) +# img0 = annotator.result() +# +# subimgs.append(img0) +# +# return subimgs +# ============================================================================= + +def get_contrast_paths(pair, basepath): + assert(len(pair)==2 or len(pair)==3), "pair: seqdir, delete, barcodes" + + getout_fold = pair[0] # 取出操作对应的文件夹 + relvt_barcode = pair[1] # 取出操作对应放入操作的 Barcode + if len(pair)==3: + error_match = pair[2] # 取出操作错误匹配的 Barcode + else: + error_match = '' + + + getoutpath, inputpath, errorpath = '', '', '' + + day, hms = getout_fold.strip('_').split('-') + + input_folds, times = [], [] + errmatch_folds, errmatch_times = [], [] + for pathname in os.listdir(basepath): + if pathname.endswith('_'): continue + if os.path.isfile(os.path.join(basepath, pathname)):continue + infold = pathname.split('_') + if len(infold)!=2: continue + + day1, hms1 = infold[0].split('-') + + if day1==day and infold[1]==relvt_barcode and int(hms1)9%uA85YX_K_w^e-`Ks!>x@v!FAQZ;O*Fr}Ijp}pdmaZ+?u#ERB*n_$R>d{s1*oTv*eSOC(t zTbk#;TH8f|UbI{2wa{mw-xQi1EdfYvcJfH>U0V*==?F-a;(o_qm6vU%oD*Aol+L-2 zTLw&FrrU7n#3?#;p^I00D!o?jvg&q2(gkB%H)#v7rD*>uR%f5#rv85Fwi(i_Scg)L zRtvHW&dR5XMWdWmDPYC53}fii zB9RgPy{ZC==ms~qfEZm>>VY0f5$S?rV`=TR2sfF}GY>G1i)|}#pB-pVi{5c+F3ob7 z<1k8qE4~--mSz0sZSb8v)i)&=V~=|y06P2J_lg@_3r;{1zXhMcVTK_SU=2OtLy*E` zI0aMeK{zAAB5n)}K^+I9Vfhx>mT@#%rTk0O@4U-r)j2e9J=!ndCVHKD2af?XSbC>T zge*?P&M6Q1lVyAotI115mvMOaI=o~%_S690;iJ6>AMs#31a~kK&%hhT;%5QA;-8^a z_=&6g=4r1piEj9Xxx@wdf$tLoN{f7UX?Rrid-zr*KDyN8u!O$h5PZOb+9lth!X&1K HH^B23)>65W delta 720 zcmZXQUr1AN6vywm+uiQAdv9u6ZEmy1bW2N{K1eSWl}cD(_766(wY;w8aK`mQ2sbn= zEXp*F5C!E1(p!Z6=)tHcuqR(CqKDOkQZGRe^b!(8zcZqj?!)KzJ%7IE9PWB{C97pr z)h+O^tFJ%%wR2HB_V2LBj_jBKc!6)55nkh9O9T+7T9UBD)>;k$yu;?i8~BXB6Jz0&$22kn`E*Vn zO`kEmG|op@7%Q6rMNx<6W&Rl*v{-) zp=C@Y928d|k!mDvfGhRV3j*Z?M{eZ+s>*3kL_$JE965o6c(aL+FRb+5y#3yrH{W~n z=G&RiW*yhIlLWutf2vl`jDF+%L19V!O_z2P2&yOwN{|LZx@4W)0S`R7N4jvGbf680 zL635{2Y~*b(gn0BMk#$!be~kgQxP#ZjrS>@CjFtxDOWl%)+H6~Bmg?i#r zDLJukl37PM3NTNZi!AxMnJIxkv@;a%BF+1Ct8P(rmz zI9VRSJ|e;8bem$CgagM8+*P(y+v*}=@^;D)W!xZWtkm4luk6&qwaS`X_sR;h&>QJ% zA#+zYB7HSpA5fM|!V#@E zBhBAxhPNWEy4i5UB{5-_JUwWz(Dx!G@WUuotA{=d{FSiQs7K1CUyrn%HJ|yBvgN*q zs#cHGmCag{P05tH+id!E+#A-s^4RfMJS8biqFOTL5LIf8RvS(iPQQctANqAss@Lf0XD_cHW^(*m22ka#oBzdtp@kP3D{xX_aXrp;6 zGOE5CwwNEV=cRvMdJ-;Mm2G2&gv#RA^y_e0Tp0WS?jC+USOmBzei*uD$%NR~))p&? zLgp214FA6q%bAbip?H#M!H=Sn-GRrbN5!Anx%7<8Cfmf`mTFAAkefM^muCFJOthpC Y$Y)|e$}+ARb7C(yuEht52f2fP0ltAsbN~PV delta 1326 zcmah}O=w(I6u#%apLu`tGMV2@k|r_M#Hp!mCxux2L#qciI(0M!EnERY?vYPO4}9?H9m3%X=|hjA z4+fOehX5EKC>)Se%uzKR}$E6G09*JM0Wxx&>xmA-qVrkyG%QsKbYha{ed zv_w2L6w&brIvznEI)W~sb6(!f(#%J9wq(YTq2$a`+Q*)uqvabvM( zqNvGDZeRf`vbfa;cESzpfz3^M0OSsGOY*&wy}!AV*iJSn4zx{e z66sgssrDG;b4mQF2d8W;RTST-Gi8;j9PZOSb)B$T%&q`C`+R>1-WJEHS@D-X=OjV) z;(wv+s-zi&>lK&fajl9UjT`UeyfAFnn_d*u_M6dGZOd!f|g%3uAwVj zYj-**NLh5ezJDaC;j^&xbOR)^ifCDPFNxU$VK&vklsZ$mSzFCmAKZVy_fA zJj3%z#1Z%HLOShZHD2bWF)~PtBm3QH*jBM34(Gt-Hb2uy#`_TV(1S115oPB$m=aH& z8AK<&G+r5D6S6I|)rqxWw-eotwZ=}{i(*kv&#oz95XDZj6$LB|>QS@ZVilRL?QaDv zh?QOM7DBTXtM#2`hn>S%W$Iba&&zHuNO%=NJV}4K`ZAImXyFn9G3KaBGmueSI0tBS z7P81y&4MBn=@^XBaTuqQfH^czEy&WNpWWZ#U=cUtI9-qnW%zlMCj>EJrw+C}6qJ7j z>>|Q-c1gNNs-qu!VNlx**k({m#vn|t#a@@0Zy>~}qzM-<#AeXyVj|8RTaf{^KHu)# zj*Uj(MO_wz>@B%RUOY~DY%^@Lh#Ar~Css$_hZXU~=x1=}D4i_>EQ|TvWm^`Bm9V?Z z&WdjCJ#8AFzZT!;K8Hu*YQ76Uh)4N-_zCfhxLLTAc}=Rx*0GNyOp6DFh3PR#T?r>;-}PC-lgL3kdmuR0$46aNvrB#2aTpOD(-O^M1cK^FGG!(dlQ) zZcr>*2)^C#o6U!_-@3n%p#i1}qJSj7fI37w=nEX;kQ|^xd;{%cjse3X!ti5^fgdS{ z81@9blnE~~W=PJc6O1xul$_Bf7^8Q08q^^b5I`ZwkWvKPnFQ1y~4;ViYKWo>AvIVjo6_8{J)D~iB za~{fvTsF&G7Kj*TP~an_@ATDmL``t>aaIYEq}zytG^*c;)9w0p&Y?KswiJzRrK{G@w&dE%JAY(t%fh=&^OwOMNdh;SN&3RwCOpBYQ|89zRNT`BVLhUQUm6vRl;T;tk zw|bj!jqAW_OaV-18q>ueX6>ZXzsbms@sf`U*nd7lwST}AczWYL2@6e9Lrk5t2;v4e zQ!_0vV;_$!Sj~O-UEr@Rq&6$;!=JoMjxa@`v5r{b20Friw4Rvj~Q++%ZW#zUi02+1>#&5rjCTt&PFhl(D81Fna_o@W&{1$LE9=k@}t zv3a&Ihgi)+p*@Ed5MN}Ap|h(&<+7jU(G+xknpteIsmLil!u=eFQ7AsGTW9gPeR8;Ag@aDejvMvC`Ako`G>yVcNuo4! z;!YaTBx(~+{|~J{Y5xEK delta 1381 zcmZ8hO>7%g5Pt9N-}8F?lh{f8*96K>*>WLG_$#GqN-F_ERYh*lXtnaL6FZ52-mXGduJA-Sdy0 zEBb|kP4M^hqn*xIOWzcKXGeKdO{8~_qQ8&<=@Cf-8n6ch=q++gB|{sXGGM2aB0klC zqRh}soAc6Uh?XJx9HP$0fZ_RypiT+ITy;;$p#V?ZlYSigQa$;Ug9$4=?*U==3$nLmyl3JVBD}EFW+Fd^mz5CsG z&)f6+L8={u{nYe_!w?nzE}EkE2IG`}7)IS->>s#!v7|1lCQW|3k(#@)@Z0;Txp#i% z)FobbFit#PuXE+;AWM1s0JC*FlPYS_<>V2sEoe3?GR`=4sG_o#e9PV98ah}=*|=!187fIU-=%BrnC?>0B6!O!2~P0o2#$sqr1R**8hAA zwEgRIWQ;xq9lyHukOjF8s}nHhHAW=I$#LMvbgE9AAWq$mZLp7V_4e5b6$G?xf)(e% zzC}*R&fn6B=`@dXclL1+16n7FWFfQW=B-C)E@IgQ++zD7Qw7}9Wl5Ihf?NaxivO`! zWK}N7<=BA|R66XKqEv>elCaPfS&Iv>081HCfJIoIA$8e+I;_;l6z@Bw(16-Z{+L#Y zY+_KUMlhgUBdE7v2`Z>BLp9ScKoeSUVTC}mMuPkbX%e{#tJuAc-E+{UC2(N1lRdf< zIzkIBsdL8imuSN+i>6u!)*q%ucsPtFsnIzY_;H$i;H+(@b};GthuyX}Sx>bnj8muE zk3$iK?YKMWiM6B+u^SCUEV8>_P^Nru(CvHKi;LXKdCxB=TjdS9 sm5j?z^P4JI^(r8d3vag8`KC~B%C%F=^MXO!^HKxz4=3VU^3%fTA0VGp&Hw-a diff --git a/tracking/dotrack/dotracks.py b/tracking/dotrack/dotracks.py index 3fdad4b..72305b8 100644 --- a/tracking/dotrack/dotracks.py +++ b/tracking/dotrack/dotracks.py @@ -94,6 +94,7 @@ class Track: self.cls = int(boxes[0, 6]) self.frnum = boxes.shape[0] self.imgBorder = False + self.isCornpoint = False self.imgshape = imgshape self.state = MoveState.Unknown @@ -101,9 +102,13 @@ class Track: self.start_fid = int(np.min(boxes[:, 7])) self.end_fid = int(np.max(boxes[:, 7])) + '''''' self.Hands = [] + self.HandsIou = [] + self.Goods = [] + self.GoodsIou = [] '''5个关键点(中心点、左上点、右上点、左下点、右下点 )坐标''' @@ -113,7 +118,7 @@ class Track: (中心点、左上点、右上点、左下点、右下点 )轨迹特征''' self.compute_cornpts_feats() - + '''应计算各个角点面积、平均面积''' mw, mh = np.mean(boxes[:, 2]-boxes[:, 0]), np.mean((boxes[:, 3]-boxes[:, 1])) self.mwh = np.mean((mw, mh)) self.Area = mw * mh diff --git a/tracking/dotrack/dotracks_back.py b/tracking/dotrack/dotracks_back.py index 28fa8ca..459bdf9 100644 --- a/tracking/dotrack/dotracks_back.py +++ b/tracking/dotrack/dotracks_back.py @@ -55,6 +55,7 @@ class doBackTracks(doTracks): # tracks = self.sub_tracks(tracks, out_trcak) + [self.associate_with_hand(htrack, gtrack) for htrack in hand_tracks for gtrack in tracks] '''轨迹循环归并''' # merged_tracks = self.merge_tracks(tracks) merged_tracks = self.merge_tracks_loop(tracks) @@ -66,17 +67,28 @@ class doBackTracks(doTracks): self.Static.extend(static_tracks) tracks = self.sub_tracks(tracks, static_tracks) - - for gtrack in tracks: - # print(f"Goods ID:{gtrack.tid}") - for htrack in hand_tracks: - hand_ious = self.associate_with_hand(htrack, gtrack) - if len(hand_ious): - gtrack.Hands.append(htrack) - gtrack.HandsIou.append(hand_ious) - - self.Residual = tracks + # for gtrack in tracks: + # for htrack in hand_tracks: + # hand_ious = self.associate_with_hand(htrack, gtrack) + # if len(hand_ious): + # gtrack.Hands.append(htrack) + # gtrack.HandsIou.append(hand_ious) + # htrack.Goods.append((gtrack, hand_ious)) + + # for htrack in hand_tracks: + # self.merge_based_hands(htrack) + + self.Residual = tracks + + # def merge_based_hands(self, htrack): + # gtracks = htrack.Goods + + # if len(gtracks) >= 2: + # atrack, afious = gtracks[0] + # btrack, bfious = gtracks[1] + + def associate_with_hand(self, htrack, gtrack): ''' 迁移至基类: @@ -91,6 +103,7 @@ class doBackTracks(doTracks): hboxes = np.empty(shape=(0, 9), dtype = np.float) gboxes = np.empty(shape=(0, 9), dtype = np.float) + # start, end 为索引值,需要 start:(end+1) for start, end in htrack.moving_index: @@ -99,18 +112,17 @@ class doBackTracks(doTracks): gboxes = np.concatenate((gboxes, gtrack.boxes[start:end+1, :]), axis=0) hfids, gfids = hboxes[:, 7], gboxes[:, 7] - fids = set(hfids).intersection(set(gfids)) + fids = sorted(set(hfids).intersection(set(gfids))) if len(fids)==0: - return hand_ious - - + return None + # print(f"Goods ID: {gtrack.tid}, Hand ID: {htrack.tid}") for f in fids: - h = np.where(hfids==f)[0][0] - g = np.where(gfids==f)[0][0] + h = np.where(hboxes[:,7] == f)[0][0] + g = np.where(gboxes[:,7] == f)[0][0] x11, y11, x12, y12 = hboxes[h, 0:4] x21, y21, x22, y22 = gboxes[g, 0:4] @@ -124,10 +136,11 @@ class doBackTracks(doTracks): iou = union / (area1 + area2 - union + 1e-6) - if iou>0: - hand_ious.append((f, iou)) + if iou >= 0.01: + gtrack.Hands.append((htrack.tid, f, iou)) + - return hand_ious + return gtrack.Hands def merge_tracks(self, Residual): """ diff --git a/tracking/dotrack/dotracks_front.py b/tracking/dotrack/dotracks_front.py index 6b50522..6eedf3c 100644 --- a/tracking/dotrack/dotracks_front.py +++ b/tracking/dotrack/dotracks_front.py @@ -44,21 +44,25 @@ class doFrontTracks(doTracks): '''剔除静止目标后的 tracks''' tracks = self.sub_tracks(tracks, static_tracks) - + + [self.associate_with_hand(htrack, gtrack) for htrack in hand_tracks for gtrack in tracks] '''轨迹循环归并''' merged_tracks = self.merge_tracks_loop(tracks) tracks = [t for t in merged_tracks if t.frnum > 1] - for gtrack in tracks: - # print(f"Goods ID:{gtrack.tid}") - for htrack in hand_tracks: - hand_ious = self.associate_with_hand(htrack, gtrack) - if len(hand_ious): - gtrack.Hands.append(htrack) - gtrack.HandsIou.append(hand_ious) - + # for gtrack in tracks: + # # print(f"Goods ID:{gtrack.tid}") + # for htrack in hand_tracks: + # hand_ious = self.associate_with_hand(htrack, gtrack) + # if len(hand_ious): + # gtrack.Hands.append(htrack) + # gtrack.HandsIou.append(hand_ious) + '''静止 tracks 判断与剔除静止 tracks''' + static_tracks = [t for t in tracks if t.frnum>1 and t.is_static()] + tracks = self.sub_tracks(tracks, static_tracks) + freemoved_tracks = [t for t in tracks if t.is_free_move()] tracks = self.sub_tracks(tracks, freemoved_tracks) @@ -73,10 +77,8 @@ class doFrontTracks(doTracks): a. 运动帧的帧索引有交集 b. 帧索引交集部分iou均大于0 ''' - assert htrack.cls==0 and gtrack.cls!=0 and gtrack.cls!=9, 'Track cls is Error!' - hand_ious = [] hboxes = np.empty(shape=(0, 9), dtype = np.float) gboxes = np.empty(shape=(0, 9), dtype = np.float) @@ -87,14 +89,12 @@ class doFrontTracks(doTracks): gboxes = np.concatenate((gboxes, gtrack.boxes[start:end+1, :]), axis=0) hfids, gfids = hboxes[:, 7], gboxes[:, 7] - fids = set(hfids).intersection(set(gfids)) + fids = sorted(set(hfids).intersection(set(gfids))) if len(fids)==0: - return hand_ious - - + return None + # print(f"Goods ID: {gtrack.tid}, Hand ID: {htrack.tid}") - ious = [] for f in fids: h = np.where(hfids==f)[0][0] g = np.where(gfids==f)[0][0] @@ -111,10 +111,10 @@ class doFrontTracks(doTracks): iou = union / (area1 + area2 - union + 1e-6) - if iou>0: - hand_ious.append((f, iou)) - - return hand_ious + if iou >= 0.01: + gtrack.Hands.append((htrack.tid, f, iou)) + + return gtrack.Hands diff --git a/tracking/goodmatch.py b/tracking/goodmatch.py index cd8ae3b..ee89559 100644 --- a/tracking/goodmatch.py +++ b/tracking/goodmatch.py @@ -30,6 +30,26 @@ from utils.drawtracks import plot_frameID_y2, draw_all_trajectories from utils.mergetrack import readDict +import csv + + +def read_csv_file(): + + file_path = r'D:\DeepLearning\yolov5_track\tracking\matching\featdata\Similarity.csv' + with open(file_path, mode='r', newline='') as file: + data = list(csv.reader(file)) + + matrix = [] + for i in range(1, len(data)): + matrix.append(data[i][1:]) + + matrix = np.array(matrix, dtype = np.float32) + + simil = 1 + (matrix-1)/2 + + print("done!!!") + + def get_img_filename(imgpath = r'./matching/images/' ): @@ -747,7 +767,7 @@ def main(): # imgsample_cleaning() '''3.1 计算事件间相似度: 将 front、back 的所有 track 特征合并''' - # calculate_similarity() + calculate_similarity() '''3.2 计算事件间相似度: 考虑前后摄的不同组合,或 track 间的不同组合''' # calculate_similarity_track() @@ -766,8 +786,29 @@ def main(): if __name__ == "__main__": - save_dir = Path(f'./result/') + # save_dir = Path(f'./result/') + # read_csv_file() + main() + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tracking/module_analysis.py b/tracking/module_analysis.py new file mode 100644 index 0000000..26a4380 --- /dev/null +++ b/tracking/module_analysis.py @@ -0,0 +1,390 @@ +# -*- coding: utf-8 -*- +""" +Created on Thu May 30 14:03:03 2024 + +现场测试性能分析 + +@author: ym +""" +import os +import cv2 +import numpy as np +from pathlib import Path +import sys +sys.path.append(r"D:\DetectTracking") + + +from tracking.utils.plotting import Annotator, colors, draw_tracking_boxes +from tracking.utils import Boxes, IterableSimpleNamespace, yaml_load +from tracking.trackers import BOTSORT, BYTETracker +from tracking.dotrack.dotracks_back import doBackTracks +from tracking.dotrack.dotracks_front import doFrontTracks +from tracking.utils.drawtracks import plot_frameID_y2, draw_all_trajectories + +from tracking.utils.read_data import extract_data, read_deletedBarcode_file, read_tracking_output + +from contrast_analysis import contrast_analysis + +from tracking.utils.annotator import TrackAnnotator + +W, H = 1024, 1280 +Mode = 'front' #'back' +ImgFormat = ['.jpg', '.jpeg', '.png', '.bmp'] + +def video2imgs(path): + vpath = os.path.join(path, "videos") + + k = 0 + have = False + for filename in os.listdir(vpath): + file, ext = os.path.splitext(filename) + imgdir = os.path.join(path, file) + if os.path.exists(imgdir): + continue + else: + os.mkdir(imgdir) + + vfile = os.path.join(vpath, filename) + cap = cv2.VideoCapture(vfile) + i = 0 + while True: + ret, frame = cap.read() + if not ret: + break + + i += 1 + imgp = os.path.join(imgdir, file+f"_{i}.png") + cv2.imwrite(imgp, frame) + + print(filename+f": {i}") + + + cap.release() + + k+=1 + if k==1000: + break + +def draw_boxes(): + datapath = r'D:\datasets\ym\videos_test\20240530\1_tracker_inout(1).data' + VideosData = read_tracker_input(datapath) + + bboxes = VideosData[0][0] + ffeats = VideosData[0][1] + + videopath = r"D:\datasets\ym\videos_test\20240530\134458234-1cd970cf-f8b9-4e80-9c2e-7ca3eec83b81-1_seek0.10415589124891511.mp4" + + cap = cv2.VideoCapture(videopath) + i = 0 + while True: + ret, frame = cap.read() + if not ret: + break + + + annotator = Annotator(frame.copy(), line_width=3) + + + boxes = bboxes[i] + + for *xyxy, conf, cls in reversed(boxes): + label = f'{int(cls)}: {conf:.2f}' + + color = colors(int(cls), True) + annotator.box_label(xyxy, label, color=color) + + img = annotator.result() + + imgpath = r"D:\datasets\ym\videos_test\20240530\result\int8_front\{}.png".format(i+1) + cv2.imwrite(imgpath, img) + + print(f"Output: {i}") + i += 1 + cap.release() + +def read_imgs(imgspath, CamerType): + imgs, frmIDs = [], [] + for filename in os.listdir(imgspath): + file, ext = os.path.splitext(filename) + flist = file.split('_') + if len(flist)==4 and ext in ImgFormat: + camID, frmID = flist[0], int(flist[-1]) + imgpath = os.path.join(imgspath, filename) + img = cv2.imread(imgpath) + + if camID==CamerType: + imgs.append(img) + frmIDs.append(frmID) + + if len(frmIDs): + indice = np.argsort(np.array(frmIDs)) + imgs = [imgs[i] for i in indice] + + return imgs + + + + pass + + + +def init_tracker(tracker_yaml = None, bs=1): + """ + Initialize tracker for object tracking during prediction. + """ + TRACKER_MAP = {'bytetrack': BYTETracker, 'botsort': BOTSORT} + cfg = IterableSimpleNamespace(**yaml_load(tracker_yaml)) + + tracker = TRACKER_MAP[cfg.tracker_type](args=cfg, frame_rate=30) + + return tracker + +def tracking(bboxes, ffeats): + tracker_yaml = r"./trackers/cfg/botsort.yaml" + tracker = init_tracker(tracker_yaml) + + TrackBoxes = np.empty((0, 9), dtype = np.float32) + TracksDict = {} + + '''========================== 执行跟踪处理 =============================''' + # dets 与 feats 应保持严格对应 + for dets, feats in zip(bboxes, ffeats): + det_tracking = Boxes(dets).cpu().numpy() + tracks = tracker.update(det_tracking, features=feats) + + + '''tracks: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index] + 0 1 2 3 4 5 6 7 8 + 这里,frame_index 也可以用视频的 帧ID 代替, box_index 保持不变 + ''' + + if len(tracks): + TrackBoxes = np.concatenate([TrackBoxes, tracks], axis=0) + + FeatDict = {} + for track in tracks: + tid = int(track[8]) + FeatDict.update({tid: feats[tid, :]}) + + frameID = tracks[0, 7] + + # print(f"frameID: {int(frameID)}") + assert len(tracks) == len(FeatDict), f"Please check the func: tracker.update() at frameID({int(frameID)})" + + TracksDict[f"frame_{int(frameID)}"] = {"feats":FeatDict} + + + return TrackBoxes, TracksDict + + + +def do_tracker_tracking(fpath, save_dir): + bboxes, ffeats, trackerboxes, tracker_feat_dict, trackingboxes, tracking_feat_dict = extract_data(fpath) + tboxes, feats_dict = tracking(bboxes, ffeats) + + CamerType = os.path.basename(fpath).split('_')[0] + dirname = os.path.split(os.path.split(fpath)[0])[1] + if CamerType == '1': + vts = doFrontTracks(tboxes, feats_dict) + vts.classify() + + plt = plot_frameID_y2(vts) + plt.savefig('front_y2.png') + # plt.close() + elif CamerType == '0': + vts = doBackTracks(tboxes, feats_dict) + vts.classify() + + filename = dirname+'_' + CamerType + edgeline = cv2.imread("./shopcart/cart_tempt/edgeline.png") + draw_all_trajectories(vts, edgeline, save_dir, filename) + else: + print("Please check data file!") + + + +def do_tracking(fpath, savedir): + ''' + fpath: 算法各模块输出的data文件地址,匹配 + savedir: 对 fpath 各模块输出的复现 + 分析具体视频时,需指定 fpath 和 savedir + ''' + # fpath = r'D:\contrast\dataset\1_to_n\709\20240709-102758_6971558612189\1_track.data' + # savedir = r'D:\contrast\dataset\result\20240709-102843_6958770005357_6971558612189\error_6971558612189' + + imgpath, dfname = os.path.split(fpath) + CamerType = dfname.split('_')[0] + + bboxes, ffeats, trackerboxes, tracker_feat_dict, trackingboxes, tracking_feat_dict = extract_data(fpath) + + tracking_output_path = os.path.join(imgpath, CamerType + '_tracking_output.data') + tracking_output_boxes, _ = read_tracking_output(tracking_output_path) + + + + + save_dir, basename = os.path.split(savedir) + if not os.path.exists(savedir): + os.makedirs(savedir) + + ''' 读取 fpath 中 track.data 文件对应的图像 ''' + + imgs = read_imgs(imgpath, CamerType) + + ''' 在 imgs 上画框并保存,如果 trackerboxes 的帧数和 imgs 数不匹配,返回原图''' + imgs_dw = draw_tracking_boxes(imgs, trackerboxes) + + if len(imgs_dw)==0: + imgs_dw = [img for img in imgs] + print(f"fpath: {imgpath}, savedir: {savedir}。Tracker输出的图像数和 imgs 中图像数不相等,无法一一匹配并画框") + + for i in range(len(imgs_dw)): + img_savepath = os.path.join(savedir, CamerType + "_" + f"{i}.png") + # img = imgs_dw[i] + cv2.imwrite(img_savepath, imgs_dw[i]) + + if not isinstance(savedir, Path): + savedir = Path(savedir) + save_dir = savedir.parent + + + traj_graphic = basename + '_' + CamerType + if CamerType == '1': + vts = doFrontTracks(trackerboxes, tracker_feat_dict) + vts.classify() + + plt = plot_frameID_y2(vts) + ftpath = save_dir.joinpath(f"{traj_graphic}_front_y2.png") + plt.savefig(str(ftpath)) + plt.close() + elif CamerType == '0': + vts = doBackTracks(trackerboxes, tracker_feat_dict) + vts.classify() + + edgeline = cv2.imread("./shopcart/cart_tempt/edgeline.png") + draw_all_trajectories(vts, edgeline, save_dir, traj_graphic) + else: + print("Please check data file!") + + + '''================== 现场测试的 tracking() 算法输出 ==================''' + if CamerType == '1': + aline = cv2.imread("./shopcart/cart_tempt/board_ftmp_line.png") + elif CamerType == '0': + aline = cv2.imread("./shopcart/cart_tempt/edgeline.png") + else: + print("Please check data file!") + + bline = aline.copy() + + annotator = TrackAnnotator(aline, line_width=2) + for track in trackingboxes: + annotator.plotting_track(track) + aline = annotator.result() + + annotator = TrackAnnotator(bline, line_width=2) + if not isinstance(tracking_output_boxes, list): + tracking_output_boxes = [tracking_output_boxes] + + for track in tracking_output_boxes: + annotator.plotting_track(track) + bline = annotator.result() + + abimg = np.concatenate((aline, bline), axis = 1) + abH, abW = abimg.shape[:2] + cv2.line(abimg, (int(abW/2), 0), (int(abW/2), abH), (128, 255, 128), 2) + + algpath = save_dir.joinpath(f"{traj_graphic}_Alg.png") + cv2.imwrite(algpath, abimg) + + return + + +def main_loop(): + del_barcode_file = 'D:/contrast/dataset/compairsonResult/deletedBarcode_20240709_pm.txt' + basepath = r'D:\contrast\dataset\1_to_n\709' # 测试数据文件夹地址 + SavePath = r'D:\contrast\dataset\result' # 结果保存地址 + prefix = ["getout_", "input_", "error_"] + + + '''获取性能测试数据相关路径''' + relative_paths = contrast_analysis(del_barcode_file, basepath, SavePath) + + '''开始循环执行每次测试过任务''' + k = 0 + for tuple_paths in relative_paths: + + '''生成文件夹存储结果图像的文件夹''' + namedirs = [] + for data_path in tuple_paths: + base_name = os.path.basename(data_path).strip().split('_') + if len(base_name[-1]): + name = base_name[-1] + else: + name = base_name[0] + namedirs.append(name) + + sdir = "_".join(namedirs) + savepath = os.path.join(SavePath, sdir) + if not os.path.exists(savepath): + os.makedirs(savepath) + + for path in tuple_paths: + for filename in os.listdir(path): + fpath = os.path.join(path, filename) + + if os.path.isfile(fpath) and filename.find("track.data")>0: + enent_name = '' + + '''构建结果保存文件名前缀''' + for i, name in enumerate(namedirs): + if fpath.find(name)>0: + enent_name = prefix[i] + name + break + + spath = os.path.join(savepath, enent_name) + + do_tracking(fpath, spath) + + k +=1 + if k==1: + break + + + + +def main_fold(): + save_dir = Path('./result') + if not save_dir.exists(): + save_dir.mkdir(parents=True, exist_ok=True) + + files_path = 'D:/contrast/dataset/1_to_n/709/20240709-112658_6903148351833/' + for filename in os.listdir(files_path): + filename = '1_track.data' + + fpath = os.path.join(files_path, filename) + if os.path.isfile(fpath) and filename.find("track.data")>0: + # do_tracker_tracking(fpath, save_dir) + do_tracking(fpath, save_dir) + + +if __name__ == "__main__": + try: + main_loop() + # main_fold() + except Exception as e: + print(f'Error: {e}') + + + + + + + + + + + + + + diff --git a/tracking/rename.py b/tracking/rename.py new file mode 100644 index 0000000..500a128 --- /dev/null +++ b/tracking/rename.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +""" +Created on Sat Jun 8 09:51:59 2024 + +@author: ym +""" +import os + +def main(): + directory = r'D:\DetectTracking\runs\detect' + directory = r'D:\DetectTracking\tracking\result\tracks' + + suffix = '_' + + for root, dirs, files in os.walk(directory): + for name in dirs: + old_name = os.path.join(root, name) + new_name = os.path.join(root, f"{name}{suffix}") + try: + os.rename(old_name, new_name) + except Exception as e: + print(f"Failed to rename directory '{old_name}': {e}") + + for name in files: + old_name = os.path.join(root, name) + file, ext = os.path.splitext(name) + new_name = os.path.join(root, f"{file}{suffix}{ext}") + try: + os.rename(old_name, new_name) + except Exception as e: + print(f"Failed to rename file '{old_name}': {e}") + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/tracking/test_tracking.py b/tracking/test_tracking.py index 21a4ba1..5a51a2f 100644 --- a/tracking/test_tracking.py +++ b/tracking/test_tracking.py @@ -12,7 +12,6 @@ import time import pickle import matplotlib.pyplot as plt import pandas as pd - from scipy.spatial.distance import cdist from pathlib import Path @@ -39,8 +38,7 @@ def detect_start_end(bboxes, features_dict, filename): boxes = np.empty(shape=(0, 9), dtype = np.float) if filename.find("back") >= 0: vts = doBackTracks(bboxes, features_dict) - vtx = [t for t in vts if t.cls != 0] - + vtx = [t for t in vts if t.cls != 0] for track in vtx: if track.moving_index.size: boxes = np.concatenate((boxes, track.moving_index), axis=0) @@ -64,7 +62,7 @@ def detect_start_end(bboxes, features_dict, filename): start = 0 return start, end - + def save_subimgs(vts, file, TracksDict): imgdir = Path(f'./result/imgs/{file}') if not imgdir.exists(): @@ -82,13 +80,14 @@ def save_subimgs(vts, file, TracksDict): cv2.imwrite(str(imgdir) + f"/{tid}_{fid}_{bid}.png", img) def have_tracked(): - trackdict = r'./data/trackdicts' + trackdict = r'./data/trackdicts_20240608' alltracks = [] k = 0 gt = Profile() for filename in os.listdir(trackdict): # filename = 'test_20240402-173935_6920152400975_back_174037372.pkl' - # filename = '加购_91.pkl' + filename = '6907149227609_20240508-174733_back_returnGood_70f754088050_425_17327712807.pkl' + filename = '6907149227609_20240508-174733_front_returnGood_70f754088050_425_17327712807.pkl' file, ext = os.path.splitext(filename) filepath = os.path.join(trackdict, filename) @@ -117,12 +116,10 @@ def have_tracked(): edgeline = cv2.imread("./shopcart/cart_tempt/edgeline.png") draw_all_trajectories(vts, edgeline, save_dir, filename) print(file+f" need time: {gt.dt:.2f}s") - - - - # k += 1 - # if k==1: - # break + + k += 1 + if k==1: + break if len(alltracks): drawFeatures(alltracks, save_dir) diff --git a/tracking/test_val.py b/tracking/test_val.py deleted file mode 100644 index e5ff032..0000000 --- a/tracking/test_val.py +++ /dev/null @@ -1,223 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Thu May 30 14:03:03 2024 - -@author: ym -""" -import os -import cv2 -import numpy as np -from pathlib import Path -import sys -sys.path.append(r"D:\DetectTracking") - - -from tracking.utils.plotting import Annotator, colors -from tracking.utils import Boxes, IterableSimpleNamespace, yaml_load -from tracking.trackers import BOTSORT, BYTETracker -from tracking.dotrack.dotracks_back import doBackTracks -from tracking.dotrack.dotracks_front import doFrontTracks -from tracking.utils.drawtracks import plot_frameID_y2, draw_all_trajectories - -W, H = 1024, 1280 - -Mode = 'front' #'back' - -def read_data_file(datapath): - - with open(datapath, 'r') as file: - lines = file.readlines() - Videos = [] - FrameBoxes, FrameFeats = [], [] - boxes, feats = [], [] - - bboxes, ffeats = [], [] - timestamp = [] - t1 = None - for line in lines: - if line.find('CameraId') >= 0: - t = int(line.split(',')[1].split(':')[1]) - timestamp.append(t) - - if len(boxes) and len(feats): - FrameBoxes.append(np.array(boxes, dtype = np.float32)) - FrameFeats.append(np.array(feats, dtype = np.float32)) - - boxes, feats = [], [] - - if t1 and t - t1 > 1e4: - Videos.append((FrameBoxes, FrameFeats)) - FrameBoxes, FrameFeats = [], [] - t1 = int(line.split(',')[1].split(':')[1]) - - if line.find('box') >= 0: - box = line.split(':', )[1].split(',')[:-1] - boxes.append(box) - bboxes.append(boxes) - - - if line.find('feat') >= 0: - feat = line.split(':', )[1].split(',')[:-1] - feats.append(feat) - ffeats.append(feat) - - - - - FrameBoxes.append(np.array(boxes, dtype = np.float32)) - FrameFeats.append(np.array(feats, dtype = np.float32)) - Videos.append((FrameBoxes, FrameFeats)) - - TimeStamp = np.array(timestamp, dtype = np.float32) - DimesDiff = np.diff((timestamp)) - - return Videos - -def video2imgs(path): - vpath = os.path.join(path, "videos") - - k = 0 - have = False - for filename in os.listdir(vpath): - file, ext = os.path.splitext(filename) - imgdir = os.path.join(path, file) - if os.path.exists(imgdir): - continue - else: - os.mkdir(imgdir) - - vfile = os.path.join(vpath, filename) - cap = cv2.VideoCapture(vfile) - i = 0 - while True: - ret, frame = cap.read() - if not ret: - break - - i += 1 - imgp = os.path.join(imgdir, file+f"_{i}.png") - cv2.imwrite(imgp, frame) - - print(filename+f": {i}") - - - cap.release() - - k+=1 - if k==1000: - break - -def draw_boxes(): - datapath = r'D:\datasets\ym\videos_test\20240530\1_tracker_inout(1).data' - VideosData = read_data_file(datapath) - - bboxes = VideosData[0][0] - ffeats = VideosData[0][1] - - videopath = r"D:\datasets\ym\videos_test\20240530\134458234-1cd970cf-f8b9-4e80-9c2e-7ca3eec83b81-1_seek0.10415589124891511.mp4" - - cap = cv2.VideoCapture(videopath) - i = 0 - while True: - ret, frame = cap.read() - if not ret: - break - - - annotator = Annotator(frame.copy(), line_width=3) - - - boxes = bboxes[i] - - for *xyxy, conf, cls in reversed(boxes): - label = f'{int(cls)}: {conf:.2f}' - - color = colors(int(cls), True) - annotator.box_label(xyxy, label, color=color) - - img = annotator.result() - - imgpath = r"D:\datasets\ym\videos_test\20240530\result\int8_front\{}.png".format(i+1) - cv2.imwrite(imgpath, img) - - print(f"Output: {i}") - i += 1 - cap.release() - -def init_tracker(tracker_yaml = None, bs=1): - """ - Initialize tracker for object tracking during prediction. - """ - TRACKER_MAP = {'bytetrack': BYTETracker, 'botsort': BOTSORT} - cfg = IterableSimpleNamespace(**yaml_load(tracker_yaml)) - - tracker = TRACKER_MAP[cfg.tracker_type](args=cfg, frame_rate=30) - - return tracker - -def tracking(bboxes, ffeats): - tracker_yaml = r"./trackers/cfg/botsort.yaml" - tracker = init_tracker(tracker_yaml) - - track_boxes = np.empty((0, 9), dtype = np.float32) - features_dict = {} - - '''==================== 执行跟踪处理 =======================''' - for dets, feats in zip(bboxes, ffeats): - # 需要根据frame_id重排序 - det_tracking = Boxes(dets).cpu().numpy() - tracks = tracker.update(det_tracking, feats) - - if len(tracks): - track_boxes = np.concatenate([track_boxes, tracks], axis=0) - feat_dict = {int(x.idx): x.curr_feat for x in tracker.tracked_stracks if x.is_activated} - frame_id = tracks[0, 7] - features_dict.update({int(frame_id): feat_dict}) - - return det_tracking, features_dict - - - -def main(): - datapath = r'D:\datasets\ym\videos_test\20240530\1_tracker_inout(1).data' - VideosData = read_data_file(datapath) - - bboxes = VideosData[0][0] - ffeats = VideosData[0][1] - - bboxes, feats_dict = tracking(bboxes, ffeats) - - - - if Mode == "front": - vts = doFrontTracks(bboxes, feats_dict) - vts.classify() - - - plt = plot_frameID_y2(vts) - plt.savefig('front_y2.png') - # plt.close() - else: - vts = doBackTracks(bboxes, feats_dict) - vts.classify() - - edgeline = cv2.imread("./shopcart/cart_tempt/edgeline.png") - draw_all_trajectories(vts, edgeline, save_dir, filename) - - - - - - - - - -if __name__ == "__main__": - filename = 'traj.png' - save_dir = Path('./result') - if not save_dir.exists(): - save_dir.mkdir(parents=True, exist_ok=True) - - main() - - diff --git a/tracking/trackers/__pycache__/bot_sort.cpython-39.pyc b/tracking/trackers/__pycache__/bot_sort.cpython-39.pyc index a6bfb5c6912728c17d8daa671a48a56d6ff4ee11..5d926ba77a4fc9e15aa7c0e92648d17919e45abe 100644 GIT binary patch delta 232 zcmaE3{>_{>k(ZZ?0SLZr>qxt@k+)S)iVeht0uUR7oq@PGYw{XFDdB(`hAie3rc%bD zfErdH%}``9`J&)A#-_<9gcdNiOwJTG6}iQdl3G%Hi>)|0zbLi1ND8PsYVsyw69GOT zi-C!Uk%du+k%N(sY4aE1-HeQFn>UCyGBVDXY$Se?ap⪚#`dUQ7k1nNkzBVO7e3u zi%X&=i%Oa?9+>PS*~XYPIZ#+!>?qKnA`2j)$zG%YVp{=;Ta%=+8TU=jkxFIEpL|Km LkdbFHi}VWsFit)} delta 229 zcmexn{>Gd)k(ZZ?0SMAOLen}n^0o>}v4Xfzzy_p2*cph6lP0ebl;ZZNVaQ@mVJc-T z@|b*6@Ec>p)|0zbLi1ND8R$*5qBnCVV_V76TIx zBMYMtBOl}Df5N*N8Cy2*5^ZE;oHp4;{3PS{&1@1}jQqD)N^+8lZn2f*=VTU_+?p&e zX~wv3a+qWrW6oqnKHGwXG{iQjfM4<|NpUOWPAl7?5HRM27)mbLHN@%owBYZ8~a zav&g_mb4B7l%f{ximGi&pag>i0;v&Fi-01PAE2^oRVZpz+TT_GAkpv4T|2>|NL^{p zoH_T*%z4e6GyA~QD?Fa9s)}g%{q>`Tk)PjuHvVoK4K1{8MYdI>c{kZb#K~{Z|PkrPJ)zUW7D%4cdX!Lr-pN7>-fs zKl+BvWqW8MJCvI!hq99uWT=AJK|km$&z8H5-c$k$YDSIOz#3SJrF4hYuoz$+!TMX}(eO6=TB7#Kr^88WAJCmkuZ3S@_BBLR`&qQ#4f3(5 zYhSlQoG0RqG$bJoaY8>;oAdxn$5zW@j`QiMvXPsE2Sz5QvmXB%P2LYsHr#2i9QJs2 zsxUKM$Z@5+GE9UYAO?1je}lLWs=;l8=54!bPwXl)k6%w@#eV~gy^bA_zlr^kZIMq_ z^?h^m$~R@ZD`1%TWXh_cCgnHuwOskk9y_a_5$KnB6IN z#t*W4<;nP!*Sdz#GEt!e#)QFd6I!vVgb>Zd1kz6O0Q`dajL=<8e)VQk5N z$FG||a421`HYwY)GcGx8#RmLN+It6quhf7K63e#yUzjj*@-i_)#7vngSg)gwkGfXoa-^TF7Q` zXb@)(G$B2nt9`*%zT81)$RewrUAnI>!ZzIuRK+j92^d_iY_FncHwfo&I?ZQ{3%qpx z-5`d~G2AYYEaGG`JU7ELnK$I0k~fag1Mr)P59n+xoqBqnV*3Cu?cOvuHiq}2tqMLyTm++6D<9aCqKD4K&7 zMzg8$uBA_!E;2SKf7W~r&hRJA?SaQm$Y`p?B2nLxTT}h>6hX^By6i$|23lbTUxa3A zdCg@zHC@YSqndA`S!gy|2+ctRGd~Qd`w>9HkBYF+@@C#b3W`;58+g?UUGw8!sOX3) z5ijZUezk~->L$&vK_5e33r6be&=b90MAWzeJ&o7kQV5G$Q70O3Q5YrbGzpeKYsAKp zecF*$KPi&=XyI*fy`tHNJzs_I3z{EO;l3dWldh{*WZjG17H;(Izj1P%qF)3QG zf>zR*Cd}oHr&8c+3ck38Yo0Oi)hfp30~r_Dv38|fji?uE0^R)9huH{oE!Td~5zYIw zQ_OGk>WXVcn`kZR5Eg2di@S; z&_PzISfhE|z`A>|ZmOg~Q+wGCi~6xW4{K1%QEH{;Nw3ogwqlJ)EWl_Ly?|e)S9nFQ z&Wba7180Z?=~b*a;|pe2U&#O1^1SGTnEenlQ4tdkf~fGn=0?U%$odgvU9V&fT_)>C zB87h7tYFB#D}26YMWuD2&F{uM}#H!w6@?b9&8=BGekOi8$jt@%Nm+Yxq@#3NT-98 z9^$#e?1Wb+8)K8BC;@X>Zx$r^aaX6UR5~uB-M_3NMax!DFs0Q4piiB=#%ojO|N#S;EIgQ7IK-w@HAe0LGjCr#H{?6LRHidS&7%5$agm1 zko*NcA5t1D*Z#o-wfgt*@fo>)%b7YVXH>w$-~)sg6$2fCbMmt-$D@kjKKc^qO6YU) ziQW%vby81DzIDTTGF;j4#@vd@h+PHL_kdkkdSz>w1?=~esPcSYn7s{)B;@b=4%;sf z_x-ZBzm2^kZ|%Q&{j)?kPe5H~FA}C!`ysH4^4b3Wc^c9UpsJ;V@QtSx8J^V)2~0Mf0{;LwQ%*4Q9c&pA4mJ;u;z;S*Y3(%g91;Ej z9jaTAQzIziG!bdi@+jq0A4zH`=9q{oTF@UMVwH%ADx@36ods5mil{nKZ!BoQBLQB? z^!kI27bAP9bQ4!)4N|(I!a9mtE#jhDoxF%FUa=e3{&sm7ju z4qodzd8M8JNSgfcirdrI(}+SLjsG&-*_mwMKjk{Qw?AE;EYEJ=KelvK&CB!MOZ%>?LJ1y476HUx*BUgqf^xstgYg#{q z2NhZkpTv+)5tt^hlYn}3JVO}W8=Ou$KSzL0W$<88=WT|DbbvU;JwL0S9~|hL^5Bki mHDT#k*}~r=Gjlv4PwvUP*>~$Vw zE^h0TI;BaSj`$OS*rjUKRw<3JR9ZDi2v9*tMWh6Tu7VIkLZBj4kPv@R!#Q(p*G=0W ztUYJW%$%7y=gfEKoOzo*JrmA^LVgK;AHTV8?Bv6j!tXbGp`}5&4@&VxAeO|)0-+b7 zma)F{-{Wnj%5`-k4$AovQtMqnduc~#AW!iN^eaB=1EFKjgc5h!C#O>32j)u zt~^CZH9w$neFF&4Aa71o@mrdY)N`Ny9_ix0)*EX&VaV|mXx5wTl3B*y%yyf^TnwcpU z^2Mys#&wOK?m_|b5Z#T^_lm~;BkKOa(}NDNC_=&&--F%`_@?Kktcmpa^yqG075Y8t zJ&g5hgP!9bgK5gh{1fS>iV%sW2nCO6aaM6+ofNhbwl^4;fLh zmZQ%VOgtuW;zGRxxJ4h5FI4m(j_yOUABhV$ePwbG5Tm73>UE^fJh#lgu#?B z@q$TSULU=xi=*qquSY}gKDYc~)K7Lj2&{l9McBd#i!%^l*#aG}!FLIjqtL8%`<7_p zMLrPwV->=D2tHw9G)|*O5goc}O^wjY%e$(>q!nev99qW(Cu-Mdx@W~ zx#q4))6j;cpTOy^L>sVg@Iw6Mpx9SP(BrUS6VJz!{PD!Kgh0e0ki$#BbkX}7fh$H} zHl3!~0!^pi<&V^Ud142yycx+hoDc-STN9(xS?fgJ6raAU?!zdt7s+8HN05l4MQ1wR z0?ik))H>t(Kxm7WP*j#?kc{!nU_xsIHp%Ogb#)PM+^fpOAAsV8;)9}Nf$QnI5?dZm zULoWlkEQN`FZe*J#XUgE&!!qRRQD(RyQ%(p1-R889-R&a`-)S5F(#=}PBO`MNtV*m zxMV9(R45u09f}8VOl&Vu+4ccdY(MidDW~SNN!1Rl@H+6pwSLJCS)P)Y1zD&p&)Z?< zXW?4Ou7ElKbp!;lu7n!aC3$w+vl5u)6=v1#k>(f3d4O6e!m1a+Wh-_8zs@e-nq93cN73sZ z1*=B8N(uKUKIL>Vg69ugzRFrb&3;g`dPPk*9#FMDA(`|3NfmUx3A#22UG;Uk-ejQR zu?Orn*r092zD9wLrb%f@p<9=f*%-`hhQ5|hZ7-n5JZo#A9Doj1+W=l#+nz~x=UPDl zg^qlV0?O=&2^qpU@DHMUfk4pkgRdZ%rPN1JEV$KdnlS0M}gbMwGv z5!lzl4P5787!@glK`6$Ib$L!DG#mNDEt}Pu)g^dK*XFN51B6qb6&{4_QDi#fW8(Lh zog$RL=1H!%c7)Erl+`VVSMxSK2tAH=`poGw zBb4%w+6-`Cvb|o<<1*cRM|)kOfGnnhPGCH1rA^SbnBGhqc%i-f_@~|M1_lffPH-YW zCuB1-qgh9NBwvIqZ#qLK(^J4wQ^<%m%}(b}W=%JdIfrI<5AQBFTcwv#Se#E1)0Kd44Q=1zdpCZm zstn^p;;UWF$1n4Z8{;~C1_r*(f4T8O6q5`ARq*L1S>67iC{f}8-`bHKgB`?mDY`+>ucQY0)_fw80#+>new6p<~! z7n4mgDU}H(F5{5IoR`iM3($2Du{Z<~Lqpf%F zTLXvm`#^s>%#UnKlHc&5fnomKw!uShKqtPn&AGzJZ8z-=0G+rz4If2hQ3Qkw-CE=j zO74RK6ImD7cLQhmdC&GbKDK>R&-ZcQO(epx&ja@!LRmZklpv)4eiDHPdJNt)H@5db zHjUHC5dKH#IL_C>|z#UZl2D!2Xzj oebA^t&ImdhyuS-)Xf=OpaA%+a1qJS=KswIjccipPWM1n0AI%rYYybcN diff --git a/tracking/trackers/bot_sort.py b/tracking/trackers/bot_sort.py index 7a69947..01b86f4 100644 --- a/tracking/trackers/bot_sort.py +++ b/tracking/trackers/bot_sort.py @@ -119,12 +119,14 @@ class BOTSORT(BYTETracker): """Returns an instance of KalmanFilterXYWH for object tracking.""" return KalmanFilterXYWH() - def init_track(self, dets, scores, cls, imgs): + def init_track(self, dets, scores, cls, imgs, features_keep): """Initialize track with detections, scores, and classes.""" if len(dets) == 0: return [] if self.args.with_reid and self.encoder is not None: - features_keep = self.encoder.inference(imgs, dets) + if features_keep is None: + features_keep = self.encoder.inference(imgs, dets) + return [BOTrack(xyxy, s, c, f) for (xyxy, s, c, f) in zip(dets, scores, cls, features_keep)] # detections else: return [BOTrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)] # detections diff --git a/tracking/trackers/byte_tracker.py b/tracking/trackers/byte_tracker.py index dd2a9e0..a4f8541 100644 --- a/tracking/trackers/byte_tracker.py +++ b/tracking/trackers/byte_tracker.py @@ -18,7 +18,12 @@ def dists_update(dists, strack_pool, detections): blabel = np.array([int(stack.cls) for stack in detections]) amlabel = np.expand_dims(alabel, axis=1).repeat(len(detections),axis=1) bmlabel = np.expand_dims(blabel, axis=0).repeat(len(strack_pool),axis=0) - dist_label = 1 - (bmlabel == amlabel) + + mlabel = bmlabel == amlabel + iou_dist = matching.iou_distance(strack_pool, detections) > 0.1 #boxes iou>0.9时,可以不考虑类别 + dist_label = (1 - mlabel) & iou_dist # 不同类,且不是严格重叠,需考虑类别距离 + + dist_label = 1 - mlabel dists = np.where(dists > dist_label, dists, dist_label) return dists @@ -103,6 +108,7 @@ class STrack(BaseTrack): self.tracklet_len = 0 self.state = TrackState.Tracked self.is_activated = True + self.first_find = False self.frame_id = frame_id if new_id: self.track_id = self.next_id() @@ -127,6 +133,7 @@ class STrack(BaseTrack): self.convert_coords(new_tlwh)) self.state = TrackState.Tracked self.is_activated = True + self.first_find = False self.score = new_track.score self.cls = new_track.cls @@ -207,7 +214,7 @@ class BYTETracker: self.args.new_track_thresh = 0.5 - def update(self, results, img=None): + def update(self, results, img=None, features=None): """Updates object tracker with new detections and returns tracked object bounding boxes.""" self.frame_id += 1 activated_stracks = [] @@ -240,7 +247,7 @@ class BYTETracker: cls_keep = cls[remain_inds] cls_second = cls[inds_second] - detections = self.init_track(dets, scores_keep, cls_keep, img) + detections = self.init_track(dets, scores_keep, cls_keep, img, features) # Add newly detected tracklets to tracked_stracks unconfirmed = [] @@ -283,7 +290,7 @@ class BYTETracker: # Step 3: Second association, with low score detection boxes # association the untrack to the low score detections - detections_second = self.init_track(dets_second, scores_second, cls_second, img) + detections_second = self.init_track(dets_second, scores_second, cls_second, img, features) r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked] # TODO @@ -366,7 +373,7 @@ class BYTETracker: output2 = [x.tlwh_to_tlbr(x._tlwh).tolist() + [x.track_id, x.score, x.cls, x.frame_id, x.idx] for x in first_finded if x.first_find] - output = np.asarray(output1+output2, dtype=np.float32) + output = np.asarray(output1 + output2, dtype=np.float32) return output @@ -382,7 +389,7 @@ class BYTETracker: tracks = [] feats = [] for t in self.tracked_stracks: - if t.is_activated: + if t.is_activated or t.first_find: track = t.tlbr.tolist() + [t.track_id, t.score, t.cls, t.idx] feat = t.curr_feature @@ -398,7 +405,7 @@ class BYTETracker: """Returns a Kalman filter object for tracking bounding boxes.""" return KalmanFilterXYAH() - def init_track(self, dets, scores, cls, img=None): + def init_track(self, dets, scores, cls, img=None, feats=None): """Initialize object tracking with detections and scores using STrack algorithm.""" return [STrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)] if len(dets) else [] # detections @@ -455,7 +462,22 @@ class BYTETracker: def remove_duplicate_stracks(stracksa, stracksb): """Remove duplicate stracks with non-maximum IOU distance.""" pdist = matching.iou_distance(stracksa, stracksb) - pairs = np.where(pdist < 0.15) + + #### ===================================== written by WQG + mlabel = [] + if len(stracksa) and len(stracksb): + alabel = np.array([int(stack.cls) for stack in stracksa]) + blabel = np.array([int(stack.cls) for stack in stracksb]) + amlabel = np.expand_dims(alabel, axis=1).repeat(len(stracksb),axis=1) + bmlabel = np.expand_dims(blabel, axis=0).repeat(len(stracksa),axis=0) + mlabel = bmlabel == amlabel + if len(mlabel): + condt = (pdist<0.15) & mlabel # 需满足iou足够小,且类别相同,才予以排除 + else: + condt = pdist<0.15 + + + pairs = np.where(condt) dupa, dupb = [], [] for p, q in zip(*pairs): timep = stracksa[p].frame_id - stracksa[p].start_frame diff --git a/tracking/trackers/reid/__pycache__/config.cpython-39.pyc b/tracking/trackers/reid/__pycache__/config.cpython-39.pyc index 437dbd0dbc2b8cd497d3fab03cdf726a0c108c5d..504c63b50f8a317d0f530e4d89ba67631738d87f 100644 GIT binary patch delta 20 ZcmZoe2e28;^IqV2Swmls z-rn!LdgnbO|aEA5U)|(Uqrf61h*!mxLHeQjp)w5>fJ(xk!p~ z!P-2bvI8E`zCR4&t|D!fUgLuy7gV`z-lj}&P|htXdb?D~_tvsB?UQa=c^xs2SkR~! zp&`F!ntLsDusK*njGKc~D3=i9#^5x{Gl;W@aZ|92GMVNaRx5~Ajrt^1^h2e3gJ6ro zd92BGMeZu%!$qxD;Zov7%8vUF0M@jE%ZLkzD~PLzf7b|2?cI?dvrqPBF};RB@2_#= z4FNnGxG>>pxWuiv#}cnTqtd<~@zD4GwA{ciB)`B4cUsDgIfUH?W6EUg-r+>;=XAo;c{_@V&D9J~3OE#1bd`uM=51Q2Pz0RG;Vo delta 687 zcmZXS&ui0Q7{~J_X_~givb7`qVUbR|8H%%ZqdJ`8+zy_GXk{1=F*a}4mNY3(f(I$+ z$%7ysUOWgpw0MxA7a88X?w@$Q`VV-~li>TbAc}$aljr?D-}lKkA%}&Ng5erQS`ycn z*AKcMJ{fNbX|ea@ryZM=>92&7NTM!I z&P*jDdD1<}O6`FvCPM!QyYq&myh45 z2ZUtVuf)R(CE?&o6a!>2#sY5Dp6hqH=8i@*@L&;jlRekW#T=G6lPCI7Kj=b@9cg7! zWhdJG(lRz@IERju=u@CUhqjJ4lCEslu)l&>MOXq>8S3m~s~rG4b#&dBNgh{Y|1rQ# zl(rFDh+Bvq#J?{E7lpPjR@~xU)1^kVyJWo1~7j_(H vp0kceOaZQRo;0~0htTP*1p$+7db{>~m(dbQ((>^Z!GBWLACy*r}QM diff --git a/tracking/trackers/reid/reid_interface.py b/tracking/trackers/reid/reid_interface.py index 0ce0fe5..06aeca9 100644 --- a/tracking/trackers/reid/reid_interface.py +++ b/tracking/trackers/reid/reid_interface.py @@ -45,8 +45,7 @@ class ReIDInterface: ]) - self.model = nn.DataParallel(model).to(self.device) - + # self.model = nn.DataParallel(model).to(self.device) self.model = model self.model.load_state_dict(torch.load(self.model_path, map_location=self.device)) self.model.eval() diff --git a/tracking/utils/__pycache__/drawtracks.cpython-39.pyc b/tracking/utils/__pycache__/drawtracks.cpython-39.pyc index c801a9f2617db89abbe68782103950ada58cc8c8..d4c35240db5deead85183381bf5740df84057db2 100644 GIT binary patch delta 1595 zcmah}TWB0r7@q&k%+BoYY%ZJKjWL@miLwn|8VQy@2u*EJ+qX(XmzK@4XZDu8^~^Lk zXAGJMMNDlx2$fpgRjCF+Xain~RRrIfr#{Yu_~1)?@kJ0p#Q)6Z_8>UJeCIpg{NMMV znKR6{Ge68kvXMwg!oz-eGk<0?G60`$PS7(68lvHKNOx0%M%JZtly=Zg9D8UN?Zz=i zqqGOdxS61_dBU0|Y4#Q*8vhttkbaDk^HQq!?xS&Sbtb2Z_`Rb8M-8vjcv%#%t>h3J z)`%6g!69ql5(k>3CAE=AuIdJC*^$bMqplHG^||TmHTQ`wOP}8uUxJWULp=4LjjxXG^kvcEaT@8pJcSAv~+N@)xDcCtRcb zM6PCcJBFj-`Zcr|j!}+c%dp=vvez=Q-}1_@_~bq4Klwj?&@s1^A2;4`UI%nOyQQq8inwVcVsZh1U8!#5$R~eKFh*aFbs(E=T^fcgYemvF-Kk?st zPVMoXLmq#Gel{flzjSFgDA+2$9D67Jrl2ng5O0>fBB09ui4BciN9w68TPv8XD0l$h znaLWSo~zdu>XikPy~|IW5<@&tgwc0niC=zTL_riD z7=^{J_A3NClG-lY%h=u7_w84J5Co*4*Hfvz+%QvweRd>cjz8$1hU$CCB)^pm^6}(F z_!1`&7>#3@kyQDEJ$EeQ@eYf8X%ar|fkh zasg!l^9WubS7Nz}iF^FZ;8SpecMM&C3O9#N!U}H=CBk2c%C7}{!+#uFz->J-?5Y+D zGQU4O4;Fu7q=G6R8B7%Q6rLG-*SqW8)OBJf#c2{pttuzbLQn((R4KGnP&j}Jtz@-CE6+N$s zZ@gyGx$c97$rU_$BvV8Pqtz{UDev8rFps^ z^R)%m(!gQ(v2uKc@aTuEOpnJ1rL4_$_^U}V> z^$c{*jUwBb`6ig-TyHDTIfL%tbh!E8(zqrs=~;PSKO$H3gYyNr%S-yCd%oU0+^9|E zdIMH>gJ{F{&Ig*?sFXZs)(g~zEu5Ms6o?K~XSU>eZlF}Bc}a*DP>UAojkzGsh20!) z)Jrv3*2*wq9`O=JL<%!wL9A453P>45Tu%g6)k%vfhfjLKE=;?Dj`mlP%R|P15v`PJ z5DCfTO{0H8E0!uw&8|8@6j=~~!tz>ZZNMgH!%a%nsUQN=d=%JYA&!Y2T%u>>U&iRz z!@vnksC`tUUDRM2{E{@uwwan_CN)u`BP>D1mGyJ+45ioQoOznM@)vW2R^{L3Kza_d zDn-ZkTyX|DvuDL+dBhr`@5`614868qu|8Jl*D}%Fpi9y=d+GP`M)$-f|7nM(eg(f6 zM}U7m+0c=_CGDPfCaxlV9s$o;oJ43M;Gv2a0RpX2cTagXoT^e3wTo4;jH)8++LR+) zc{r7(@5$#*Ttuh=1Ztrq z3Kd5*V{lL%iT+(r0G{xCQLxBbjeOR0ds zhK zM^ky0%cT)K58Y2{<%v7_pXM?kY1J#R!Zb#xj(Q|QFsS8hShUovO!~~k1^tQVtoKsd()bVQyg(}i%g;oBO-kqqnHjfwQb!B*)aPk zd~gAACpN!8cgaosKIW+;TLH4Al58hXFCAGMHc!XK7l>!HE1(6Ky=%wmKEY_U5IM`K zm;k1E?l!rIN_qJ+o|FIJ;vr@D&3bqvyRgVv#1^%%sIYs(SZD|J`&gMgy59Cm897aM zoou(gfGdlITBj&$G!(5~Ei2n^UK4)gDH61m%|a#wyRPh7qs5|QUT(?sLR1ZRkt1$&ifugzx> zs6MUaq<)j*^kb85q_`!&qA%>~6YlImyHPI_XLQGSdIgC;X<4sLz}BD?gpQ$J3G7;iRI+A0KMuXq z)4YyXA4PWT+{Eit+>`gLCHrA~WSzDi%3rPH(~tGRl!$=)f)c3jyY=T=-%tJilF&!= zn95`JoAc9&lOhX874@1Gq2&S00H;!?>t5h>+v3az@@u<#p|oX&#PiyL`mN4TkT?v6 z54GbFfYO+7w4v`W zCS@?BUI_Alnk*Szvoav8=JQa2q>1APktcu^Q@TJ3zRnH(rr%J8W+8WDfmOT)Bx@wp z91tUkw*3I^29$Dq-gVo|7h+Prm)g4qceK?BlN_PKOyP<$^sbbl_oN7`DN?B^OV?l( zMZ0WC(d!zztgJbJZbj=fbSqj1U?#g&Daf(pGU2g=Ee}|;6ZowFTGI;3kurfqO9N6K z>b)9G8s&Pc9oBFYepVh%@3&qhS!_`GcKXnPqjVn66Iir}i%$$=1j7p#yih2>RWHyH zDt!56`qayV*$}7oA-$+!XgGE}T`P>l9qC+8A zjH<0>`^@LM&Jj7Ay>xasP`smU+ghOqR5SqkNen=V4u*-gTWzTRV|Mz4-n??E9pUN{ zFdLfu@7H9So~V0+o_#8J<__Xep31$xc2Q^7r4_C!%*D1WqAkmS>wpN~7;0CuE#N>w z3bap4!?PP#Mw9u$H(&_$_0S!2;I}HS0k0g_)mGOnWtBAZBdKxFik$#*DnE5h|8|A1 hVOfJNxE&X;rsuj(6-&y_$X0$|W|tE;1MWvQNqZWDqOT`_po#132&e)+F z;Z;Pd9x4Q+9jQ`kU}ZuK)|A&0>;}Me5NJa*duQmwveUh zW`rcL+GavMBQd@tci9N>%uTQLl-JtyW|OQ(Ja!#2?`iTAI{2yuL0=!j*vCZE>kQS`8Zx7`a?Y11$U+FKCc0S2_ zaDOkmM8qTcfj&F}H)t1PZL#1P3p6nodrAZ>-OI_(_2sF#q%SE!zxtBF5mF{zeu=JA zF_-kB?P*I4tgq!>ik^yg3X< zgW|BYMA{XIqYYH@QRev_&@+7db-Ju#GqJAGbxquoqvo{vAbD|JYd(+{%|nwFCMdK$ zg@uXhe_Z%}{PA&tQ~bMzz~p^%?zu5gD@?HiFWH)3cLmxV7za&dk1qHPzZHsOSLCEs zot=6T2KHw`x3!?T(zQwKgFp)t_XAW)A`3!UI7TbNk3=7ait?fL{>}ob0~j{zK_jTI z)RkVX1)=DdC$l@&>?ep)>aW(dkZUEEO>Rv@t0S4%}Mp$(3R*z0HS4RhQTt+ zw9JZzMRl#n%Je8JP+d=b@TbmEUH+UKqxImqH#A-?r1HD;%c)V9{UvQ?3Z^6@15x0 zC{AE(%PVkp#Vo)rRMIijI&-Fl%@C?j_e>ta!`oJMML1Xjo)yLa-X>r7#e&~y;|+PE zXCFN;zwLQz?KrZZ08q55s3*2;ap6g%qrC$r-0?i78liwU2J3CiIfwA{!IfU;LE|n2 zBK&S*$1G-1aS?Q7ISxi0r&3Vz%)ff0N nPCv`y3~FZ)@FZJ=4PN&hXWJ>#^Nh>Ky%YHz)@94IO0d|!<214) diff --git a/tracking/utils/__pycache__/plotting.cpython-39.pyc b/tracking/utils/__pycache__/plotting.cpython-39.pyc index 5c81e9afa530f5642c077dc2b7e7e9985c700e3c..d4155b487d53f895b1ad003ec0cdce475b41c770 100644 GIT binary patch delta 4345 zcmai1X>c6H6`r1J4_zzC*s?M98Xqu{EcTEr$#EH5l7#^y4!cMgu$R@&>}ssN{AOe% z*DPxy6DTeR4`m1>6{#vo#W^@4LIuZ3$d6D(RVx4TXMQCq1f@Sos`%$hDtWI*7b+@Q z)qMT>d)=?QU%!6cnt$vEMsv2QDJtM^_MM2VV!QOn$k4z6T}>VxOs9KwHIW$U9U9Wr)bQZoQM8vx45voXPP(_Z zZ!oQ^2YUO{R8J(51ILDSb)YvrFuDtgzEmPH(1!yJA;AY8>>K72O7{=n;5eborpuD8mKaDrfYW`R}ey+ZJQ1j`dyn;sIYbKnvZ zt8?IzCf&^!74%tN9u;fs$}p?!wZKI|mp1S3>hR_!?4ipO=ClpX)VYzM#k(i;NzZl^ zeaR|2umdGc25RdwOP9tA01ecTcj3_D9S;9A90zkg={YUvZ|YMH9M6%SYl8^@WJ{apaNxyGgtg z!EdyktYhU$rZZ77AhOzH-7FW5MX$*%WWR9p>9aQ+Z{lvyynioXlm>rH^5crT^e>Ol$I^mTi^M zf90HsF0VjtLRU|jWxE2t>QsuB?eMMirQG?H9|1gy*>VAm+(W~jK=4B+?j1XpIeHY1 zdrj@zS(DeCuT9(?nl2%=1>xgj=y*z)ukVQaJ*{ zSYj+lhE>8s#=?dti0gn;SinvMKa{nz0NEKnCtuE0sxxpKS5!?XO*tlvQccQLr+lS6 zRjSVPL6sO0*L#Kp)d^M{$`>nHC!NwGQ)T->@WDRS| z*273;sI@MaQS{|9Mhg1X&QvS5$@7oTCiM{adIp7Q+<*!gdT1sPC~C@iU&Kx5l(A3J zCrs%I(aE4lZFI$p-i@7)v%OwNyb#XP1QlrVo{R6;cuunmzl`^aLCo`f&EPd!)HVd4 zMKMeA>Pg}#;Nfus+LLAfHUsJWP{G9GbioX~eTDQitgqPBqJ@ig%xMRX6HVum`xbH} z@$IOOlV~7l&D@dT5%FDu2?Sro958`F85yrkXKZH%cce&4ev}pRuI;l7yg=L}L5bij z!G;(in}5L1P9O+kn8ny8=6$~V?IWkL4idpd0zNUy_qsd+*{6WkgzgAZ9yQot_@ejc zJ?D4Qp1JWR3J-@ z;B|yitr2JmkR(VEqzQHtlnI_8c#(j1B5>wKlx~GNs;^C-OZ@Wc^OE~^vc83f_kAWd z?4ec^6R_7C()#QCxJV_d92^Fo#iEa9i&N$?fC?<17ceOdv?w25k65Kd#-7MlG0Ec@ zvnrWt)*v<+u)bW#T4i5CmahuWm6>ICzuAZ%RF>JfH6=yy7WMYa!?(Lt!w_n_!#9@@;|FMsUozo93uxBc^fF!G}eu z01ta#>)P6RnR-gOq7C0i3GxJFRF|>Mmr|JbbXQmXebST1auh);zjhSp$|uhL7lOeI z@sd!JT*+m#tR_2xE6*}SVU|rwu%{rL#2cojIE?ra;q=E(h&8nqazs~f)vdyuG>L`I zmn9~gJK=`sLi0kc%~j^YZreOVF5xQp#&4X8VL&Qk$7k@HPGN~3S7a=s zJ8tJHV0OpR{oV0AWJ_koDjVi>cbv{Y7f(my>wa~T6Zj?ZOW~Ks4}ZNu5s&wA;lOgW ze}%?!V8zhlvkObFz5S0HZ>|vW_?<~Dj@xa2`m3LR{^q66em1{&{qo|Kmx77L12&$w zjCg(VoevkUzqYPtiua5!zI%P~`i~ahy^IY$z5dGd%=FCSbC(xieqp64FLQLvR(>i9 z^if{5uOc|Y9CHN&|DF{MmKQM$L`Rxu^Q^{PHYd*utWHrQx)d=+O~MY!mpi!9q&SbU zw^gXg7)f$qj7-%OSDaH_g?7maIbl~>9!MhXEJmplautkD(eAzv!)lg&lb!2!Mchqg zEO>U}AxsmX+ delta 2987 zcmaJ@U2GFq7S3eG<2b)AbRkfFLV>gcOYmQaol+o)af<>GDhMeoU=5xdd%&JCXU3_W zwz%rQ#A>VRN^R*|)%GDst<;96mi|1nQd{lAKJ3$dSt?aXd046H?o+iAd(Iu>c%xMl z<-6y8=jWb#?zwmJq4oa!pgAzm+l4=+<3F)~C*K;}rrdgK|Jz#zt%%k8igJFFHDvW$ z11N2_lrvpM^wXcus7;~ga>;ZuO=BjTnV@kZ7SGW*orq_JK9kEPl5wG@(z#4*O6Zxy z%uF2ZlG*HZ`jF7$Q<-EkCG=Pp&pf+3Vd4IqhXDG zuuPj>4oVcbjPif#Gn;myhtNu_?!lwvJVLOW=fXdX#Ym;vjt>cx3=O2MVhi>$Nqgk?o2rND1DPDSK!kvXhhg(GP)*~{I? z*yA#ShI2y^A-`5FGngW+TuDmg3)}g3k!KD_zauyo{hn|7cA>)jl54^Cwns;nE7{&< zZHVF*f4hF=j?YU+;VA#l;IWuYJ&TIf5Q4E0&XNB*I7?gr{MyhkzdIC~JWAZ}g7MJ* z*FOkBVYaSYsbk9T56vov_-u5$PA32txf4AY?bsBNY5s9^Jo*@kYcRI*YxFNlU;9*G zp5NH8jfu#D>-c91Cf3zOkJbD5GiMBlUfZ)BETU6jSa%l+C83{Xj^~2VeYb3TzC2H% z78VmC0<3_hMGSJ0mc5K1!Y5D69nViq!6n|mahtM>8yn+WrlrQM`JQdD&YoHEHol`a z*V3bFGiP)Q-C~P&fx$tX7xAyF>#lnY=5fS^P%XQHgNB0#9&y8P1qYxONKGwJ$_~Ql>Q-!=U2WQ{+FhoCV8G;+;QlD z?2BVKUk+}aQ0nXb_n`J8^4QC_>^QJ_t^ThQDNCwAm7+D<(Lg%LsRVfeDbO7mh`>RZ zGd8Rp2HYT8Hpiby3d+T>>I_Ugi3K95O7euDj*`#|HWaX>=-q&&CSieit!z6C7|m}Y z;Qge#AhfGh-wOikU|M#~6Fo@f$y#U*iip@LSXVD!bv-5@A)#Ewqok_mQK;hqxL}EJ zqa|S1I18d14`H8%K5;>q5Fw)D!H`&A(_8$Doqtj$`R0*gPlFN+!7l#V$d2{*w6St~ zB-I`Cyh3&g$O?|pZj@epD&^G1scd+-MiQM#tGa z8KUZXLdnB2aW50RO7IeaY!3;tM=0iZ_~+5*q^r9}i7F2({NC`E-}mCPzW@_5H=H(xa^V&g4VE1I?=RAl9@@LsV6AHJcbLSPq5eml<|{6Bz_ zd6(Ze4(jicIL}l2Za0@y+nW4M8Cg=t5(_fnv)ILSRgEyrQluVWiwrzwwLTa4-3acv ztP@qk=*M@Q`S|Zer3m!Ii-_qsE`GnU>9l&I-Voik1-D6~7Rnxcj}lxb*hDJ{4ITgI zVdF3C-yi)4stjcg)0rn}n2*N~Ds|4{dzx>Ouk052%B!bOoD7YqX2hlBiIk=WxZfYGrnWdfFR7=uzmmpO6nE{_tO(}a_GjvM1#nBnQWKiG%qh;zF`xy5O*XjY<9KSZ2&fdh!Fh~Q|4)#PN-4>1K zQGSAIr6%rMx}v_F|3GsqeFFZ4KIcdg)(w)Tqe#euj-tsni-p2rtZsQx=&@9U+LXm4 z3tK|jQslqT=pt1l5p7@5t*9#!S=WLJbrcHE4LPENKabXzH`hffymUh{?_ChK@&&wP zn(H`kw0aLB(AGWFWxCM&ygGkCP)ro6ue0dBl)U%yfO~q zrI?(6@V*8`!<|l?6(Vv9vj0g;>)d!=Ch-Fa@1=iKs9Zx;q?>iKsr-!WHX5LG9ftyE zYc^k^z3CfCL{HRznE#~jXB`l_k!X7Dt8d1D2klBISUItbVjI=YMH6CF3HvR#))p6eD}{)1e-2wrsFMlgqV;C;+{@7r(Q+xK2KKQ(H+S}j}V{5st4 z9+ke-&Z8PJF4$dT>4Ca|)-MaZVm-$>7G!LtI|r7aIeu%svtObYC`Y-)?A$hph5g9# z9rYb;G^c;e@$u9#DpME0Q$Nw>1D|LTiQYid^eIn5O#~luU5;h%2`eH%NW*?t(@s1I zQ&}w}ESO@suVsTqk58YO&>|u}Cs7${`?p1`O z`rs{Be547V^itoEE%nVidjyN67fgKtABK@J-_g#nn5uRZ|AUyXb zOh#7=D&s=1Zcp4dp%j+Fa_gvq+NgnAw!C6|pK6ye5eJMxXS@IAXPZ=!pVOW7ViMZB UHHgKiAMODyg>5C&!?l}#0YtL1oB#j- diff --git a/tracking/utils/__pycache__/readData.cpython-39.pyc b/tracking/utils/__pycache__/readData.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab579b5853a49d5338bba6561f4b1573cd01a69d GIT binary patch literal 2167 zcmcgt&5smC6tC*8{@mGRg#{s5u{SZwL|kWvC5Fici7X^~;DP~}4C%B}wM(-z-A#2( z!fv{~42Hx6LF2{SZek*M(vva9c=ssfZe5E4zaC-vT|SFhej)$i3r zlaoGyZ_oEPm+Lh`e#Oa?p8=CoF!C-CPCA5BZuDu#sJPiNxyjiL(qY`8lX`8v<)4-!Qi2C*5}cKB@ZPW= z1jmEsvG(y3?N&2rHChY4e<~WPE2(S;YXg4^vLfet6xsGS1>A*k8bD|TlIbSNk2DCaewTo*kkVWMYLJqZ*0yAq4(MG@O0O`M$Q^pQ zRr-a$VQ^!Ua&y(XLuH^$&@j$GV;v$O*<)!$%Zb9>u!@OIs8`vAJ)-hHa5x`%zhE1d za<00C4bd5CDQ$k3Uf=o?KFyW^TXkQvG!u#T0BO7* zCnAT1IN{oi6Q!*@>&HslQ3f`k>w~3{WZH_PjMlWX+)pEQtW}@VE{{}{Md}J}?+a~> zec2acF4DZ?pOui~JLz@tSUwBUb8SI75Yob2a_y;jAaWHAGHs})X6QpxB@w<7`|=)< zF9UmYbgA845=!*c`!edS#>q-oJr;&4?&n=BX9-f8&(`!_oP?EukYSu;Lxs-rHV{H< z^i|5JPkn<`e45UJJWUV4&o^f16s*m{9JJ;X(3*iOQ?8tY-A{Fz4GMHzb=tQe{JT!W zZn}K9WN_cmOWeFc044CMY|oba7wF(oc&8=HTXmw`i$Gkxrq{5SPPT~XiR8^>8FwVosXMwE1`*C@uq);6l zQBDBwte~G6*BBs@Zkb?Gr}714hBoFKn&#E5jY$A{?JxG@T=mjHcJghQ{{e%nzqWf# zaG~RSnqTavy{Mm`1mW=xi4i0o_Ie$!_>7dP43=>WlU_S`hMObxx+f=a8@%a6P9xca zWG|9^NM?X&@67d{$W)vr+R7w!K;4%6arFR_7tw?v(B(@gQt|h?_B|u^2{6rZe+4xF znM-GxK@nKf^sp&kM^|nb4x%^-!{HT}0=j!?0)Wj`7sKU3bq7#>v?_LR9zMMJ&4Vw# zd+_y-zkPD|;ZJuT{`|x5pMJjeEIDDw(_R>AD;W;5wd%gLAw|`^J4c_u+Z2YK z8joc4hT#FpLy&D}YDYDGpdKJ1H=*g`qlGRvi| Q;lAnacTE5Sf&sbt7Ys!{&j0`b literal 0 HcmV?d00001 diff --git a/tracking/utils/__pycache__/read_data.cpython-39.pyc b/tracking/utils/__pycache__/read_data.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4bb2812b05f0927b520c572f45d7bfdc61c5005 GIT binary patch literal 4734 zcmaJ_TWnlM89sAvd%QQVH+Jl%AzG@cWJ_+&jl_g1qQ(*hMU`R+bxFE9**WW;WWBqd zb6jF;j#SmDlvYh*?k%LSE}}FND!hQ8^ogg6CtjHcULbD$LM3=Xd4cbrv)N7V^;r0gxX%69KUmFHDj)yq z5BKl_k4O(G4sN7$@vHtGb{kMMj(T{GdfB)C(x8D2YgV*oheYb)mc)i#zLclMi0Sp8G$2|D&~+{{Fky?!Wi?`pwn#+dsd5=V$BR`~AjC zy=?ZpzZk#J5mS|=`RoQ5H!d_7b^V`--p2PdzUT-VY4jj}NMo;rF1Y!M`G}DispK&S z*^n$(Si&TOG@J_$*rAR8;I}K;i$r!5%^V0{&L=E3BwJzMHW%!R#y#U6Q%xmKs)x1y&o*j!#Ul^3*m^sO*{K{-*k6~;iZsFoXS z^OqhnCdisqX(#0pu#rcnnpu{&MwuW?g9NiOCb{@a`#`MEe~C@7EED6vY}4ul5OhP$|A(MDhdzovc3PMpL|GKm*kFfp5)`IfcH?!280$)Raum0f-<$;xcc zUNfkVURam~18BBA0&g*ak7~d zWI@YtR=TnP$1Xxc`CdU5A&X&Il*5k#Cq3)~1|e_u8@PjwzZHua?eZk<82$SgY?1z+ z!v(QLNaWS3O-gYAyiHD^nM|iFY{a{xLjckL;DP)m_my4R=1|NmyOLA1&I7eFo(F?WqSIvdr_I<}m#`svWcdIYlxt)5# z=FU135!8{1G{deb!DbNNA!jcYEQV7MZG1YB>1?1}6fAygPTB2_m{*SQ+jD`k z!gj3O#V#^&Aj;sdI-xvD%UvG{)x82K1QEE_?5ceKVj5f9gwi=jBgNzTmT|hmjr(&{ zD@DqtPFa`(_qe}uaE0J{%_X4TthvNly{Sg}$J8~RhmBZ_5m_q^apNX6SI`tTWtSc{ z08ugcbE>>KyM@Y*WSZYv-aUpBqT6T;mNiSPWR9{jGO5RGW|~E0)d`G8=`%;o3fsqu zSVe7-9l$!QG(DEb{2_B6@CVod>P?%K*hze4cF4+tRtYOS=3?Yx>|#$Ay{+4Ws6I-1 zhsO@;J?Bj^1CF+3auAlR81gat=*Yr; zKqj|QBu+~kvCYv%q8(d3o1-YdVPM{&c`6i69fXUo%7FhJf2o`60zE z?B5o7N|Q!(cU#=S2@(|PB#Hdpq=GzqK|G2Zi2c;yePIZyxdatQ+wQzs>i*xFG_94$ zO$9mJj?zhx>%L0(JeuW$x&R&6Nb1A8RhWV*CqWff)e}`3KS@o2nkT5C%3BoCC_4(5 z192F`s;j(a*b2yt#93OP{Q^GQ3dA|RmYi${i)wT?rX-d}2hbQWlfD7LNK7hKVPa%@ zS@xtUzJ?Xsghnh54CQ%*!L0^@qU{X51GH(tAapTkC3dJwJsr?>sh7Ujq^aKxp7dZ+ z?^BBFnf(&)CrE!(x;0UjpwK&K0L}n@QNl2|r(r7jZo;ta)Ks{gSUb;Im;p_PQ+bv} zag!TBNIFR#$tMqU`;*LrW*$i-r(cOm!vzf&G5;|iPApKe#0*L=yPrlg7@;^rNi>u4 ziehhcJGa0*v7KLC5DqxrX4+{f_3T#TmUt3XOFNBuW}7<5hRJue4U;+Och!Ea1Y?b! zXYk%N6TL}&`p&9a57_6F@qj&sd`5<+G9u`<{08MWQm(e99d@Km{Y{$GrhX`WYXvPl z$oQ9if$SI5$r5)nca^!4u29)1oDW;R2;-#(Y%0yLlxA!0#b9AN6e=^Fl2rEB@f@42 zy4pR|5FMuje(EWQh!JuM?IhJgEkkENi}b>p;)_HutKV%pY}Y)$)vD9;PT3UrYxzDq z8dn>B$Ww~QHCln+#u)iUi=tN0>9HGdjZ{v{zY?^R^P=Bc40gy#O?;YUz5})*@>2tW z+tXpWr~^0oAA-3K(xz2pDV9RIP82$j=gl&9on+#h*sr{Lecs1It-k!F>8Ybg&4@Ws ze3aZj3gV-a^u%o+{lc-cN9q3#$1pfNIeGl_sWbJ{XOEqjJbC8CsmU`ZP8?m%V=29% zkI>fT?8C9Lr#tPSI;t}DI`1^@Ml>q)ZVgvlBfm1awBx(PF@0n`)A}%93KD zx=#HW(gBB-bsVDw%GMM1)lS$}L$owq>4p&j;`4M{rz=oVY>~50x%$=Zoty9Q#a8eX fIYLxHV`sGB953s6Ud`LCj+zovaooxP8a&Uw7?Ybdxjt8vElI-6yx)@KcF z%$nTf)`m7~ahvB*+uY%K)H&|*0&0gB`2gxXFY(7vyL^y8j=B&Q`BxTo@%^x{t}%`4 z7qs#dYiHdTMHnO@_o9Y(aV7L#30A%FF>mbD)Hwboyz$ETN!L9etR$~TV#=$9;ZhV0 zuhlD+3HKg$sm(n2#`M(aeAGxp5GSKNNP;*_M#ual@*AViSI&%*iV7OTV`Jl|Pfhx# z&r~MHPEJmo8k?M$7@cRNrSp&hY`ibxjZdOTwKj~f$;AE?2I2aT^g~)pOITay1~)fo z+@SGkZ5?Hcr3}ZiW*)E;_x_9LA>}4?ULfWBP%L2irDBUETFTbh&-G>Vg7%^IA(K|M z9tBBRm-fymdaV)q zWR8*Ms&voRYjH9kEj7>4m;ZxTTRR>ng3P48<_AKIoUKRmK|MZ)#?JJzDV?yv;~&(L z*QMTQN(&Q%RcXb|dX4tTVp-^F2v&DDD+p|Iu>T_^NMnaKpu4PS9A@GPw0kU-gMelz zH~bTg1+B&K>adi;BsYF)y!C#|NR4Uj()E^^nr*hJQ5}ygA4N~+<|gCTq9ugt*{Y{e zuREGM0GE9(0=cts6{h4325> zXBD8Gey`$9thSv(zpC|3R;63u&PXO6SSv|&coNQswdW%a&#sxTMOUT-;qyTuT1lEK ziBFPy1fn&oihDD4kDyV`Ni%AO4frGxwWc(ywFZ}V(1eUD>k2Zucr2Tnuh#0J-w2jM zX+d8Y18>k3)4F;nO#+TIFaiPO{SG}AULnXOz5-q-{92>4voQ9Rq67F&d=!PoO#B9! z!yLn8x^a{pW+lCZazq!;Vtk%1+|ENs>0Z7nI>b5~8r_aIZQ?{0nZ=^wY> z{`k&cH@^7vqwP08+q(Agop*n~-TrZRbn6d)-ulapJHOhvyLtWo?Q3^Fd4KEn+xKt3 z`S}}Hm99+PTUViFs-qk?#~U8@x+<~{bOw_^EQAS--@Wz2?O*=}ABFP1kM3%|`?nig z@4mOAxo;Xy(cL^LQG3J2wr*YBe(&b?&EIAxx_|rn*0o<|nuf=AHtu;nm(hSHy0qn? zqiE|OOwL+*%DA?u2@_7zyA;O6AxsnG%d#QpB1^U?SwE6Q0V9(%vxs7Ch*0Yjf*T9K z6!B58`elqHXh$>Z8~@)%#n$V$?O#15pYGWQC)&HVp zr4}yw8!bDv+d8Nobv(5^gdXB>ho*->-!z^{>@HDb{yJ;rQlo99IU@H2B%J2f^_G)5 zDo6~adg>su=abxJr=3sph~;jYPu(Msf%+|yFc8M0=^a}2B-eIRkd>;jvnqmmIX5FH z?20F;K<1^1gH#Z;-y>_33yKcK5t0&fimy^Zfl)j~1=&!MxU@mfL484*jYuqIlwOb} z74>&{y|9OqWv8R4S$L%A7!^-b@eCEmQAi87FT5gcn&wxfP3rw>zt5lUC;#2Jw8Q@2 zfwq`bYI;%c<^K|U3V?8#18f%&6`o__EavrqgKj_<9LHgy4meDr8)A8Y!@_HGxKx2e zrDDhjHpra-Ndqp+E0Ca$Us$fBIdL5lgIfTKjhtYX);7igmpp*srWPFRc7S1|E`U); z3u@+dz{x_~C>+vgyhlrg=UTbt%0rO&q$lF%D7Q;;M30Ml{EwG4?yR$`Br%lSRPujA zj=d;3G7yX+qgd2`#$pE}E{zblQX@4J0w_+RYUDLl8vrZJe7g@}GQX1;`U+u zb~A83k10Y$1dmz-)6NQt3DtVA(1%)ahUWG0(lb36rIinZ^fK1P#B3U<79a&syJ-sA zC1w--5>!>fa$G;i4zk0#cmXTsY1z(`M}yK;MSF8-09t@-8|+2@6t@z zCJ0*fkci-aiV_kVi=)MjMN^c(N&v~6O(1VsExVO#IZ2KKAYh&nN5X>tECgjtYpycm z2xa%mxze1D8e!RydKAkX;+vQ*&eNJw7gL%4cKgbKvbfjM-CpM0J};R|L9Ou 0.5 for emb in embs) diff --git a/tracking/utils/plotting.py b/tracking/utils/plotting.py index 40921f6..b0b528b 100644 --- a/tracking/utils/plotting.py +++ b/tracking/utils/plotting.py @@ -1,4 +1,4 @@ -# Ultralytics YOLO 🚀, AGPL-3.0 license +# Ultralytics YOLO 🚀, AGPL-3.0 license import contextlib import math @@ -284,5 +284,59 @@ def boxing_img(det, img, line_width=3): imgx = annotator.result() return imgx + +def draw_tracking_boxes(imgs, tracks, scale=2): + '''tracks: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index] + 0 1 2 3 4 5 6 7 8 + 关键: + (1) imgs中的次序和 track 中的 fid 对应 + (2) img 尺度小对于xyxy减半 + + ''' + + def array2list(bboxes): + track_fids = np.unique(bboxes[:, 7].astype(int)) + track_fids.sort() + + lboxes = [] + for f_id in track_fids: + # print(f"The ID is: {t_id}") + idx = np.where(bboxes[:, 7] == f_id)[0] + box = bboxes[idx, :] + lboxes.append(box) + + assert len(set(box[:, 4])) == len(box), "Please check!!!" + + return lboxes + + bboxes = array2list(tracks) + + if len(bboxes)!=len(imgs): + return [] + + subimgs = [] + for i, boxes in enumerate(bboxes): + annotator = Annotator(imgs[i].copy()) + for *xyxy, tid, conf, cls, fid, bid in boxes: + label = f'id:{int(tid)}_{int(cls)}_{conf:.2f}' + + if cls==0: + color = colors(int(cls), True) + elif tid>0 and cls!=0: + color = colors(int(tid), True) + else: + color = colors(19, True) # 19为调色板的最后一个元素 + + pt2 = [p/scale for p in xyxy] + annotator.box_label(pt2, label, color=color) + + img = annotator.result() + subimgs.append(img) + + return subimgs + + + + diff --git a/tracking/utils/proBoxes.py b/tracking/utils/proBoxes.py index 8f003b7..ee8ddaf 100644 --- a/tracking/utils/proBoxes.py +++ b/tracking/utils/proBoxes.py @@ -12,10 +12,12 @@ class Boxes: """Initialize the Boxes class.""" if boxes.ndim == 1: boxes = boxes[None, :] - n = boxes.shape[-1] - assert n in (6, 7, 8), f'expected `n` in [6, 7], but got {n}' # xyxyb, track_id, conf, cls + m, n = boxes.shape + assert n in (6, 7), f'expected `n` in [6, 7], but got {n}' # xyxy, track_id, conf, cls + + '''对每一个box进行编号,利用该编号可以索引对应 feature''' + self.data = np.concatenate([boxes[:, :4], np.arange(m).reshape(-1, 1), boxes[:, 4:]], axis=-1) - self.data = boxes self.orig_shape = orig_shape def cpu(self): @@ -30,10 +32,9 @@ class Boxes: """Return the boxes in xyxy format.""" return self.data[:, :4] - @property def xyxyb(self): - """Return the boxes in xyxyb format.""" + """Return the boxes in xyxyb format.""" return self.data[:, :5] @property diff --git a/tracking/utils/read_data.py b/tracking/utils/read_data.py new file mode 100644 index 0000000..612c043 --- /dev/null +++ b/tracking/utils/read_data.py @@ -0,0 +1,236 @@ +# -*- coding: utf-8 -*- +""" +Created on Fri Jul 5 13:59:21 2024 +func: extract_data() + 读取 Pipeline 各模块的数据,在 read_pipeline_data.py(马晓慧)的基础上完成接口改造 + +@author: ym +""" +import numpy as np +import re +import os + + + + +def str_to_float_arr(s): + # 移除字符串末尾的逗号(如果存在) + if s.endswith(','): + s = s[:-1] + + # 使用split()方法分割字符串,然后将每个元素转化为float + float_array = [float(x) for x in s.split(",")] + return float_array + + +def find_samebox_in_array(arr, target): + + for i, st in enumerate(arr): + if st[:4] == target[:4]: + return i + return -1 + + +def extract_data(datapath): + bboxes, ffeats = [], [] + + trackerboxes = np.empty((0, 9), dtype=np.float64) + trackerfeats = np.empty((0, 256), dtype=np.float64) + + boxes, feats, tboxes, tfeats = [], [], [], [] + with open(datapath, 'r', encoding='utf-8') as lines: + for line in lines: + line = line.strip() # 去除行尾的换行符和可能的空白字符 + if not line: # 跳过空行 + continue + + if line.find("CameraId")>=0: + if len(boxes): bboxes.append(np.array(boxes)) + if len(feats): ffeats.append(np.array(feats)) + if len(tboxes): + trackerboxes = np.concatenate((trackerboxes, np.array(tboxes))) + if len(tfeats): + trackerfeats = np.concatenate((trackerfeats, np.array(tfeats))) + + boxes, feats, tboxes, tfeats = [], [], [], [] + + if line.find("box:") >= 0 and line.find("output_box:") < 0: + box = line[line.find("box:") + 4:].strip() + boxes.append(str_to_float_arr(box)) + + if line.find("feat:") >= 0: + feat = line[line.find("feat:") + 5:].strip() + feats.append(str_to_float_arr(feat)) + + if line.find("output_box:") >= 0: + box = str_to_float_arr(line[line.find("output_box:") + 11:].strip()) + tboxes.append(box) # 去掉'output_box:'并去除可能的空白字符 + index = find_samebox_in_array(boxes, box) + if index >= 0: + # feat_f = str_to_float_arr(input_feats[index]) + feat_f = feats[index] + norm_f = np.linalg.norm(feat_f) + feat_f = feat_f / norm_f + tfeats.append(feat_f) + + if len(boxes): bboxes.append(np.array(boxes)) + if len(feats): ffeats.append(np.array(feats)) + if len(tboxes): trackerboxes = np.concatenate((trackerboxes, np.array(tboxes))) + if len(tfeats): trackerfeats = np.concatenate((trackerfeats, np.array(tfeats))) + + assert(len(bboxes)==len(ffeats)), "Error at Yolo output!" + assert(len(trackerboxes)==len(trackerfeats)), "Error at tracker output!" + + tracker_feat_dict = {} + for i in range(len(trackerboxes)): + tid, fid, bid = int(trackerboxes[i, 4]), int(trackerboxes[i, 7]), int(trackerboxes[i, 8]) + if f"frame_{fid}" not in tracker_feat_dict: + tracker_feat_dict[f"frame_{fid}"]= {"feats": {}} + tracker_feat_dict[f"frame_{fid}"]["feats"].update({bid: trackerfeats[i, :]}) + + + boxes, trackingboxes= [], [] + tracking_flag = False + with open(datapath, 'r', encoding='utf-8') as lines: + for line in lines: + line = line.strip() # 去除行尾的换行符和可能的空白字符 + if not line: # 跳过空行 + continue + if tracking_flag: + if line.find("tracking_") >= 0: + tracking_flag = False + else: + box = str_to_float_arr(line) + boxes.append(box) + if line.find("tracking_") >= 0: + tracking_flag = True + if len(boxes): + trackingboxes.append(np.array(boxes)) + boxes = [] + + if len(boxes): + trackingboxes.append(np.array(boxes)) + + tracking_feat_dict = {} + for i, boxes in enumerate(trackingboxes): + for box in boxes: + tid, fid, bid = int(box[4]), int(box[7]), int(box[8]) + if f"track_{tid}" not in tracking_feat_dict: + tracking_feat_dict[f"track_{tid}"]= {"feats": {}} + tracking_feat_dict[f"track_{tid}"]["feats"].update({f"{fid}_{bid}": tracker_feat_dict[f"frame_{fid}"]["feats"][bid]}) + + return bboxes, ffeats, trackerboxes, tracker_feat_dict, trackingboxes, tracking_feat_dict + +def read_tracking_output(filepath): + boxes = [] + feats = [] + with open(filepath, 'r', encoding='utf-8') as file: + for line in file: + line = line.strip() # 去除行尾的换行符和可能的空白字符 + + if not line: + continue + + if line.endswith(','): + line = line[:-1] + + data = np.array([float(x) for x in line.split(",")]) + if data.size == 9: + boxes.append(data) + if data.size == 256: + feats.append(data) + + return np.array(boxes), np.array(feats) + + +def read_deletedBarcode_file(filePth): + with open(filePth, 'r', encoding='utf-8') as f: + lines = f.readlines() + + split_flag, all_list = False, [] + dict, barcode_list, similarity_list = {}, [], [] + + clean_lines = [line.strip().replace("'", '').replace('"', '') for line in lines] + for line in clean_lines: + stripped_line = line.strip() + if not stripped_line: + if len(barcode_list): dict['barcode'] = barcode_list + if len(similarity_list): dict['similarity'] = similarity_list + if len(dict): all_list.append(dict) + + split_flag = False + dict, barcode_list, similarity_list = {}, [], [] + continue + + # print(line) + label = line.split(':')[0] + value = line.split(':')[1] + if label == 'SeqDir': + dict['SeqDir'] = value + if label == 'Deleted': + dict['Deleted'] = value + if label == 'List': + split_flag = True + continue + if split_flag: + barcode_list.append(label) + similarity_list.append(value) + + if len(barcode_list): dict['barcode'] = barcode_list + if len(similarity_list): dict['similarity'] = similarity_list + if len(dict): all_list.append(dict) + return all_list + + + + +if __name__ == "__main__": + files_path = 'D:/contrast/dataset/1_to_n/709/20240709-112658_6903148351833/' + + # 遍历目录下的所有文件和目录 + for filename in os.listdir(files_path): + filename = '1_track.data' + file_path = os.path.join(files_path, filename) + if os.path.isfile(file_path) and filename.find("track.data")>0: + extract_data(file_path) + + print("Done") + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tracking/utils/read_pipeline_data.py b/tracking/utils/read_pipeline_data.py new file mode 100644 index 0000000..d0de65a --- /dev/null +++ b/tracking/utils/read_pipeline_data.py @@ -0,0 +1,250 @@ +# -*- coding: utf-8 -*- +""" +Created on Tue May 21 15:25:23 2024 +读取 Pipeline 各模块的数据,主代码由 马晓慧 完成 + +@author: ieemoo-zl003 +""" + +import os +import numpy as np + +# 替换为你的目录路径 +files_path = 'D:/contrast/dataset/1_to_n/709/20240709-112658_6903148351833/' + +def str_to_float_arr(s): + # 移除字符串末尾的逗号(如果存在) + if s.endswith(','): + s = s[:-1] + + # 使用split()方法分割字符串,然后将每个元素转化为float + float_array = np.array([float(x) for x in s.split(",")]) + return float_array + +def extract_tracker_input_boxes_feats(file_name): + boxes = [] + feats = [] + with open(file_name, 'r', encoding='utf-8') as file: + for line in file: + line = line.strip() # 去除行尾的换行符和可能的空白字符 + + # 跳过空行 + if not line: + continue + + # 检查是否以'box:'或'feat:'开始 + if line.find("box:") >= 0 and line.find("output_box:") < 0: + box = line[line.find("box:") + 4:].strip() + boxes.append(str_to_float_arr(box)) # 去掉'box:'并去除可能的空白字符 + + if line.find("feat:") >= 0: + feat = line[line.find("feat:") + 5:].strip() + feats.append(str_to_float_arr(feat)) # 去掉'box:'并去除可能的空白字符 + + return np.array(boxes), np.array(feats) + +def find_string_in_array(arr, target): + """ + 在字符串数组中找到目标字符串对应的行(索引)。 + + 参数: + arr -- 字符串数组 + target -- 要查找的目标字符串 + + 返回: + 目标字符串在数组中的索引。如果未找到,则返回-1。 + """ + tg = [float(t) for k, t in enumerate(target.split(',')) if k<4][:4] + for i, st in enumerate(arr): + st = [float(s) for k, s in enumerate(target.split(',')) if k<4][:4] + + if st == tg: + return i + + # if st[:20] == target[:20]: + # return i + return -1 + +def find_samebox_in_array(arr, target): + + for i, st in enumerate(arr): + if all(st[:4] == target[:4]): + return i + return -1 + + + + + + +def extract_tracker_output_boxes_feats(read_file_name): + + input_boxes, input_feats = extract_tracker_input_boxes_feats(read_file_name) + + boxes = [] + feats = [] + with open(read_file_name, 'r', encoding='utf-8') as file: + for line in file: + line = line.strip() # 去除行尾的换行符和可能的空白字符 + + # 跳过空行 + if not line: + continue + + # 检查是否以'output_box:'开始 + if line.find("output_box:") >= 0: + box = str_to_float_arr(line[line.find("output_box:") + 11:].strip()) + boxes.append(box) # 去掉'output_box:'并去除可能的空白字符 + index = find_samebox_in_array(input_boxes, box) + if index >= 0: + # feat_f = str_to_float_arr(input_feats[index]) + feat_f = input_feats[index] + norm_f = np.linalg.norm(feat_f) + feat_f = feat_f / norm_f + feats.append(feat_f) + return input_boxes, input_feats, np.array(boxes), np.array(feats) + +def extract_tracking_output_boxes_feats(read_file_name): + tracker_boxes, tracker_feats, input_boxes, input_feats = extract_tracker_output_boxes_feats(read_file_name) + boxes = [] + feats = [] + + tracking_flag = False + with open(read_file_name, 'r', encoding='utf-8') as file: + for line in file: + line = line.strip() # 去除行尾的换行符和可能的空白字符 + + # 跳过空行 + if not line: + continue + + if tracking_flag: + if line.find("tracking_") >= 0: + tracking_flag = False + else: + box = str_to_float_arr(line) + boxes.append(box) + index = find_samebox_in_array(input_boxes, box) + if index >= 0: + feats.append(input_feats[index]) + # 检查是否以tracking_'开始 + if line.find("tracking_") >= 0: + tracking_flag = True + + assert(len(tracker_boxes)==len(tracker_feats)), "Error at Yolo output" + assert(len(input_boxes)==len(input_feats)), "Error at tracker output" + assert(len(boxes)==len(feats)), "Error at tracking output" + + return tracker_boxes, tracker_feats, input_boxes, input_feats, np.array(boxes), np.array(feats) + +def read_tracking_input(datapath): + with open(datapath, 'r') as file: + lines = file.readlines() + + data = [] + for line in lines: + data.append([s for s in line.split(',') if len(s)>=3]) + # data.append([float(s) for s in line.split(',') if len(s)>=3]) + + # data = np.array(data, dtype = np.float32) + try: + data = np.array(data, dtype = np.float32) + except Exception as e: + data = np.array([], dtype = np.float32) + print('DataError for func: read_tracking_input()') + + + return data + + + +def read_tracker_input(datapath): + with open(datapath, 'r') as file: + lines = file.readlines() + Videos = [] + FrameBoxes, FrameFeats = [], [] + boxes, feats = [], [] + + timestamp = [] + t1 = None + for line in lines: + if line.find('CameraId') >= 0: + t = int(line.split(',')[1].split(':')[1]) + timestamp.append(t) + + if len(boxes) and len(feats): + FrameBoxes.append(np.array(boxes, dtype = np.float32)) + FrameFeats.append(np.array(feats, dtype = np.float32)) + boxes, feats = [], [] + + if t1 and t - t1 > 1e3: + Videos.append((FrameBoxes, FrameFeats)) + FrameBoxes, FrameFeats = [], [] + t1 = int(line.split(',')[1].split(':')[1]) + + if line.find('box') >= 0: + box = line.split(':', )[1].split(',')[:-1] + boxes.append(box) + + + if line.find('feat') >= 0: + feat = line.split(':', )[1].split(',')[:-1] + feats.append(feat) + + FrameBoxes.append(np.array(boxes, dtype = np.float32)) + FrameFeats.append(np.array(feats, dtype = np.float32)) + Videos.append((FrameBoxes, FrameFeats)) + + # TimeStamp = np.array(timestamp, dtype = np.int64) + # DimesDiff = np.diff((TimeStamp)) + # sorted_indices = np.argsort(TimeStamp) + # TimeStamp_sorted = TimeStamp[sorted_indices] + # DimesDiff_sorted = np.diff((TimeStamp_sorted)) + + return Videos + + + +def main(): + files_path = 'D:/contrast/dataset/1_to_n/709/20240709-112658_6903148351833/' + + # 遍历目录下的所有文件和目录 + for filename in os.listdir(files_path): + # 构造完整的文件路径 + file_path = os.path.join(files_path, filename) + if os.path.isfile(file_path) and filename.find("track.data")>0: + tracker_boxes, tracker_feats, tracking_boxes, tracking_feats, output_boxes, output_feats = extract_tracking_output_boxes_feats(file_path) + + print("Done") + + +if __name__ == "__main__": + main() + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/videos_select.py b/videos_select.py new file mode 100644 index 0000000..d1f22e9 --- /dev/null +++ b/videos_select.py @@ -0,0 +1,641 @@ +# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license +""" +Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc. + +Usage - sources: + $ python detect.py --weights yolov5s.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + screen # screenshot + path/ # directory + list.txt # list of images + list.streams # list of streams + 'path/*.jpg' # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream + +Usage - formats: + $ python detect.py --weights yolov5s.pt # PyTorch + yolov5s.torchscript # TorchScript + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s_openvino_model # OpenVINO + yolov5s.engine # TensorRT + yolov5s.mlmodel # CoreML (macOS-only) + yolov5s_saved_model # TensorFlow SavedModel + yolov5s.pb # TensorFlow GraphDef + yolov5s.tflite # TensorFlow Lite + yolov5s_edgetpu.tflite # TensorFlow Edge TPU + yolov5s_paddle_model # PaddlePaddle +""" + +import argparse +import csv +import os +import platform +import sys +from pathlib import Path +import glob +import numpy as np +import pickle +import torch + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[0] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +sys.path.append((str(ROOT)+'\\tracking\\utils')) + +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + + +from models.common import DetectMultiBackend +from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams +from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, + increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh) +from utils.torch_utils import select_device, smart_inference_mode + +'''集成跟踪模块,输出跟踪结果文件 .npy''' +# from ultralytics.engine.results import Boxes # Results +# from ultralytics.utils import IterableSimpleNamespace, yaml_load +from tracking.utils.plotting import Annotator, colors +from tracking.utils import Boxes, IterableSimpleNamespace, yaml_load, boxes_add_fid +from tracking.trackers import BOTSORT, BYTETracker +from tracking.utils.showtrack import drawtracks +from hands.hand_inference import hand_pose +from tracking.dotrack.dotracks_back import doBackTracks +from tracking.dotrack.dotracks_front import doFrontTracks + +from tracking.trackers.reid.reid_interface import ReIDInterface +from tracking.trackers.reid.config import config as ReIDConfig +ReIDEncoder = ReIDInterface(ReIDConfig) + +# tracker_yaml = r"./tracking/trackers/cfg/botsort.yaml" + +def inference_image(image, detections): + H, W, _ = np.shape(image) + imgs = [] + batch_patches = [] + patches = [] + for d in range(np.size(detections, 0)): + tlbr = detections[d, :4].astype(np.int_) + tlbr[0] = max(0, tlbr[0]) + tlbr[1] = max(0, tlbr[1]) + tlbr[2] = min(W - 1, tlbr[2]) + tlbr[3] = min(H - 1, tlbr[3]) + img1 = image[tlbr[1]:tlbr[3], tlbr[0]:tlbr[2], :] + + img = img1[:, :, ::-1].copy() # the model expects RGB inputs + patch = ReIDEncoder.transform(img) + + imgs.append(img1) + # patch = patch.to(device=self.device).half() + if str(ReIDEncoder.device) != "cpu": + patch = patch.to(device=ReIDEncoder.device).half() + else: + patch = patch.to(device=ReIDEncoder.device) + + patches.append(patch) + if (d + 1) % ReIDEncoder.batch_size == 0: + patches = torch.stack(patches, dim=0) + batch_patches.append(patches) + patches = [] + + if len(patches): + patches = torch.stack(patches, dim=0) + batch_patches.append(patches) + + features = np.zeros((0, ReIDEncoder.embedding_size)) + for patches in batch_patches: + pred = ReIDEncoder.model(patches) + pred[torch.isinf(pred)] = 1.0 + feat = pred.cpu().data.numpy() + features = np.vstack((features, feat)) + + return imgs, features + + + +def init_trackers(tracker_yaml = None, bs=1): + """ + Initialize trackers for object tracking during prediction. + """ + # tracker_yaml = r"./tracking/trackers/cfg/botsort.yaml" + + TRACKER_MAP = {'bytetrack': BYTETracker, 'botsort': BOTSORT} + + cfg = IterableSimpleNamespace(**yaml_load(tracker_yaml)) + trackers = [] + for _ in range(bs): + tracker = TRACKER_MAP[cfg.tracker_type](args=cfg, frame_rate=30) + trackers.append(tracker) + + return trackers + +def detect_start_end(bboxes, features_dict, filename): + + boxes = np.empty(shape=(0, 9), dtype = np.float64) + if filename.find("back") >= 0: + vts = doBackTracks(bboxes, features_dict) + vts.classify() + # vtx = [t for t in vts.tracks if t.cls != 0] + # for track in vtx: + for track in vts.Residual: + if track.moving_index.size: + boxes = np.concatenate((boxes, track.moving_index), axis=0) + + elif filename.find("front") >= 0: + vts = doFrontTracks(bboxes, features_dict) + vts.classify() + # vtx = [t for t in vts.tracks if t.cls != 0] + # for track in vtx: + for track in vts.Residual: + for start, end in track.dynamic_y2: + boxes = np.concatenate((boxes, track.boxes[start:end+1, :]), axis=0) + for start, end in track.dynamic_y1: + boxes = np.concatenate((boxes, track.boxes[start:end+1, :]), axis=0) + + if boxes.size > 0: + start = np.min(boxes[:, 7]) + end = np.max(boxes[:, 7]) + else: + start, end = 0, 0 + + return start, end + +@smart_inference_mode() +def run( + weights=ROOT / 'yolov5s.pt', # model path or triton URL + source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) + + project=ROOT / 'runs/detect', # save results to project/name + name='exp', # save results to project/name + + tracker_yaml = "./tracking/trackers/cfg/botsort.yaml", + imgsz=(640, 640), # inference size (height, width) + conf_thres=0.25, # confidence threshold + iou_thres=0.45, # NMS IOU threshold + max_det=1000, # maximum detections per image + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + view_img=False, # show results + save_txt=False, # save results to *.txt + save_csv=False, # save results in CSV format + save_conf=False, # save confidences in --save-txt labels + save_crop=False, # save cropped prediction boxes + nosave=False, # do not save images/videos + classes=None, # filter by class: --class 0, or --class 0 2 3 + agnostic_nms=False, # class-agnostic NMS + augment=False, # augmented inference + visualize=False, # visualize features + update=False, # update all models + exist_ok=False, # existing project/name ok, do not increment + line_thickness=3, # bounding box thickness (pixels) + hide_labels=False, # hide labels + hide_conf=False, # hide confidencesL + half=False, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + vid_stride=1, # video frame-rate stride + data=ROOT / 'data/coco128.yaml', # dataset.yaml path +): + source = str(source) + # filename = os.path.split(source)[-1] + + save_img = not nosave and not source.endswith('.txt') # save inference images + is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) + is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) + webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file) + screenshot = source.lower().startswith('screen') + if is_url and is_file: + source = check_file(source) # download + + save_dir = Path(project) / Path(source).stem + if save_dir.exists(): + print(Path(source).stem) + # return + + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + else: + save_dir.mkdir(parents=True, exist_ok=True) + + # Load model + device = select_device(device) + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, names, pt = model.stride, model.names, model.pt + imgsz = check_img_size(imgsz, s=stride) # check image size + + # Dataloader + bs = 1 # batch_size + + dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + vid_path, vid_writer = [None] * bs, [None] * bs + + + # Run inference + model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup + seen, dt = 0, (Profile(), Profile(), Profile()) + + tracker = init_trackers(tracker_yaml, bs)[0] + + handpose = hand_pose() + handlocals_dict = {} + + boxes_and_imgs = [] + + BoxesFeats = [] + + + + track_boxes = np.empty((0, 9), dtype = np.float32) + det_boxes = np.empty((0, 9), dtype = np.float32) + + DetBoxes = np.empty((0, 6), dtype = np.float32) + TrackerBoxes = np.empty((0, 9), dtype = np.float32) + TrackerFeats = np.empty((0, 256), dtype = np.float32) + + images = [] + + features_dict = {} + TracksDict = {} + for path, im, im0s, vid_cap, s in dataset: + if save_img and 'imgshow' not in locals().keys(): + imgshow = im0s.copy() + + ## ============================= tracking 功能只处理视频,writed by WQG + if dataset.mode == 'image': + continue + + with dt[0]: + im = torch.from_numpy(im).to(model.device) + im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + if len(im.shape) == 3: + im = im[None] # expand for batch dim + + # Inference + with dt[1]: + visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False + pred = model(im, augment=augment, visualize=visualize) + + # NMS + with dt[2]: + pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) + + # Process predictions + for i, det in enumerate(pred): # per image + seen += 1 + if webcam: # batch_size >= 1 + p, im0, frame = path[i], im0s[i].copy(), dataset.count + s += f'{i}: ' + else: + p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) + + images.append((dataset.frame, im0)) + + p = Path(p) # to Path + save_path = str(save_dir / p.name) # im.jpg + s += '%gx%g ' % im.shape[2:] # print string + + # im0_ant = im0.copy() + annotator = Annotator(im0.copy(), line_width=line_thickness, example=str(names)) + + nd = len(det) + if nd: + # Rescale boxes from img_size to im0 size + det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() + + det = det.cpu().numpy() + det = np.concatenate([det[:, :4], np.arange(nd).reshape(-1, 1), det[:, 4:]], axis=-1) + + DetBoxes = np.concatenate([DetBoxes, det[:, :6]], axis=0) + + ## ============================================================ 前后帧相同 boxes 的特征赋值 + # def static_estimate(box1, box2, TH1=8, TH2=12): + # dij_abs = max(np.abs(box1 - box2)) + # dij_euc = max([np.linalg.norm((box1[:2] - box2[:2])), + # np.linalg.norm((box1[2:4] - box2[2:4])) + # ]) + # if dij_abs < TH1 and dij_euc < TH2: + # return True + # else: + # return False + + # nw = 3 # 向前递推检查的窗口大小 + # nf = len(BoxesFeats) # 已经检测+特征提取的帧数 + # feat_curr = [None] * nd # nd: 当前帧检测出的boxes数 + # for ii in range(nd): + # box = det[ii, :4] + + # kk=1 + # feat = None + # while kk <= nw and nf>=kk: + # ki = -1 * kk + # boxes_ = BoxesFeats[ki][0] + # feats_ = BoxesFeats[ki][1] + + # flag = [jj for jj in range(len(boxes_)) if static_estimate(box, boxes_[jj, :4])] + # if len(flag) == 1: + # feat = feats_[flag[0]] + # break + # kk += 1 + # if feat is not None: + # feat_curr[ii] = feat + + + + + ## ================================================================ writed by WQG + + '''tracks: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index] + 0 1 2 3 4 5 6 7 8 + 这里,frame_index 也可以用视频的 帧ID 代替, box_index 保持不变 + ''' + + det_tracking = Boxes(det, im0.shape).cpu().numpy() + tracks = tracker.update(det_tracking, im0) + if len(tracks) == 0: + continue + tracks[:, 7] = dataset.frame + + '''================== 1. 存储 dets/subimgs/features Dict =============''' + imgs, features = inference_image(im0, tracks) + + + TrackerFeats = np.concatenate([TrackerFeats, features], axis=0) + + + + + + imgdict = {} + boxdict = {} + featdict = {} + for ii, bid in enumerate(tracks[:, 8]): + imgdict.update({int(bid): imgs[ii]}) # [f"img_{int(bid)}"] = imgs[i] + boxdict.update({int(bid): tracks[ii, :]}) # [f"box_{int(bid)}"] = tracks[i, :] + featdict.update({int(bid): features[ii, :]}) # [f"feat_{int(bid)}"] = features[i, :] + TracksDict[f"frame_{int(dataset.frame)}"] = {"imgs":imgdict, "boxes":boxdict, "feats":featdict} + + track_boxes = np.concatenate([track_boxes, tracks], axis=0) + + + '''================== 2. 提取手势位置 ===================''' + # idx_0 = tracks[:, 6].astype(np.int_) == 0 + # hn = 0 + # for j, index in enumerate(idx_0): + # if index: + # track = tracks[j, :] + # hand_local, imgshow = handpose.get_hand_local(track, im0) + # handlocals_dict.update({int(track[7]): {int(track[8]): hand_local}}) + + # # '''yoloV5和手势检测的召回率并不一直,用hand_local代替tracks中手部的(x1, y1, x2, y2),会使得两种坐标方式混淆''' + # # if hand_local: tracks[j, :4] = hand_local + + # hn += 1 + # cv2.imwrite(f"D:\DeepLearning\yolov5\hands\images\{Path(source).stem}_{int(track[7])}_{hn}.png", imgshow) + + for *xyxy, id, conf, cls, fid, bid in reversed(tracks): + name = ('' if id==-1 else f'id:{int(id)} ') + names[int(cls)] + label = None if hide_labels else (name if hide_conf else f'{name} {conf:.2f}') + + if id >=0 and cls==0: + color = colors(int(cls), True) + elif id >=0 and cls!=0: + color = colors(int(id), True) + else: + color = colors(19, True) # 19为调色板的最后一个元素 + + annotator.box_label(xyxy, label, color=color) + + # Save results (image and video with tracking) + im0 = annotator.result() + save_path_img, ext = os.path.splitext(save_path) + if save_img: + if dataset.mode == 'image': + imgpath = save_path_img + f"_{dataset}.png" + else: + imgpath = save_path_img + f"_{dataset.frame}.png" + + cv2.imwrite(Path(imgpath), im0) + + if vid_path[i] != save_path: # new video + vid_path[i] = save_path + if isinstance(vid_writer[i], cv2.VideoWriter): + vid_writer[i].release() # release previous video writer + if vid_cap: # video + fps = vid_cap.get(cv2.CAP_PROP_FPS) + w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + else: # stream + fps, w, h = 30, im0.shape[1], im0.shape[0] + save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos + vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + vid_writer[i].write(im0) + + # Print time (inference-only) + LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") + + + ## ======================================================================== written by WQG + ## track_boxes: Array, [x1, y1, x2, y2, track_id, score, cls, frame_index, box_id] + + '''上面保存了检测结果是视频和图像,以下还保存五种类型的数据''' + filename = os.path.split(save_path_img)[-1] + file, ext = os.path.splitext(filename) + + mfid = np.max(track_boxes[:, 7]) + start, end = detect_start_end(track_boxes, features_dict, filename) + + + if start == end: + return + + if start > 5: + start = start - 5 + else: + start = 0 + + if mfid - end > 5: + end = end + 5 + + + # img_path = Path(f'./runs/images/{file}/') + + img_path = savepath / f'{file}' + + if not img_path.exists(): + img_path.mkdir(parents=True, exist_ok=True) + + '''抽帧间隔数''' + Interval = 3 + for i, img in images: + if i >= start and i % Interval == 0: + imgpath = img_path / f'{file}_{int(i)}.png' + cv2.imwrite(Path(imgpath), img) + if i == end: + break + + + '''======================== 3. save hand_local data ==================''' + # handlocal_dir = Path('./tracking/data/handlocal/') + # if not handlocal_dir.exists(): + # handlocal_dir.mkdir(parents=True, exist_ok=True) + # handlocal_path = handlocal_dir.joinpath(f'{filename}.pkl') + # with open(handlocal_path, 'wb') as file: + # pickle.dump(handlocals_dict, file) + + + # Print results + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) + if save_txt or save_img: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + if update: + strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning) + + +def parse_opt(): + modelpath = ROOT / 'ckpts/best_yolov5m_250000.pt' # 'ckpts/best_15000_0908.pt', 'ckpts/yolov5s.pt', 'ckpts/best_20000_cls30.pt' + + '''datapath为视频文件目录或视频文件''' + datapath = r"D:/datasets/ym/videos/标记视频/" # ROOT/'data/videos', ROOT/'data/images' images + # datapath = r"D:\datasets\ym\highvalue\videos" + # datapath = r"D:/dcheng/videos/" + # modelpath = ROOT / 'ckpts/yolov5s.pt' + + parser = argparse.ArgumentParser() + parser.add_argument('--weights', nargs='+', type=str, default=modelpath, help='model path or triton URL') # 'yolov5s.pt', best_15000_0908.pt + parser.add_argument('--source', type=str, default=datapath, help='file/dir/URL/glob/screen/0(webcam)') # images, videos + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') + parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--view-img', action='store_true', help='show results') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-csv', action='store_true', help='save results in CSV format') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') + parser.add_argument('--nosave', action='store_true', help='do not save images/videos') + parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') + parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--visualize', action='store_true', help='visualize features') + parser.add_argument('--update', action='store_true', help='update all models') + parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save results to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') + parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') + parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride') + opt = parser.parse_args() + opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand + print_args(vars(opt)) + return opt + +def find_files_in_nested_dirs(root_dir): + all_files = [] + extensions = ['.mp4'] + for dirpath, dirnames, filenames in os.walk(root_dir): + for filename in filenames: + file, ext = os.path.splitext(filename) + if ext in extensions: + all_files.append(os.path.join(dirpath, filename)) + return all_files + +print('=======') + +def main(opt): + check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) + + p = r"D:\datasets\ym\永辉测试数据_202404\20240402" + + optdict = vars(opt) + files = [] + k = 0 + if os.path.isdir(p): + files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) + for file in files: + optdict["source"] = file + run(**optdict) + + k += 1 + if k == 2: + break + elif os.path.isfile(p): + run(**vars(opt)) + +def main_loop(opt): + check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) + + optdict = vars(opt) + + # p = r"D:\datasets\ym\永辉测试数据_比对" + p = r"D:\datasets\ym\广告板遮挡测试\8" + # p = r"D:\datasets\ym\videos\标记视频" + # p = r"D:\datasets\ym\实验室测试" + # p = r"D:\datasets\ym\永辉双摄视频\新建文件夹" + + k = 0 + if os.path.isdir(p): + files = find_files_in_nested_dirs(p) + + # files = [r"D:\datasets\ym\广告板遮挡测试\8\6926636301004_20240508-175300_back_addGood_70f754088050_215_17327712807.mp4", + # r"D:\datasets\ym\videos\标记视频\test_20240402-173935_6920152400975_back_174037372.mp4", + # r"D:\datasets\ym\videos\标记视频\test_20240402-173935_6920152400975_front_174037379.mp4", + # r"D:\datasets\ym\广告板遮挡测试\8\2500441577966_20240508-175946_front_addGood_70f75407b7ae_155_17788571404.mp4" + # ] + + files = [r"D:\datasets\ym\广告板遮挡测试\8\6907149227609_20240508-174733_back_returnGood_70f754088050_425_17327712807.mp4"] + + + for file in files: + optdict["source"] = file + run(**optdict) + + k += 1 + if k == 1: + break + elif os.path.isfile(p): + optdict["source"] = p + run(**vars(opt)) + + + + +if __name__ == '__main__': + opt = parse_opt() + # main(opt) + savepath = ROOT / 'runs/images' + main_loop(opt) + + + + + + + + + + + + + + + + + + + + + + + + + + +