From 5ecc1285d49ab7c9a2d236c71087e2213f67ca3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E5=BA=86=E5=88=9A?= Date: Mon, 4 Nov 2024 18:06:52 +0800 Subject: [PATCH] update --- __pycache__/event_time_specify.cpython-39.pyc | Bin 0 -> 6293 bytes __pycache__/imgs_inference.cpython-39.pyc | Bin 0 -> 12367 bytes __pycache__/move_detect.cpython-39.pyc | Bin 0 -> 6246 bytes __pycache__/track_reid.cpython-39.pyc | Bin 16691 -> 17208 bytes contrast/__pycache__/__init__.cpython-39.pyc | Bin 0 -> 191 bytes .../__pycache__/one2n_contrast.cpython-39.pyc | Bin 9186 -> 9971 bytes contrast/genfeats.py | 209 ++++++++ contrast/one2n_contrast.py | 229 ++++----- contrast/one2one_contrast.py | 276 ++++++----- contrast/one2one_onsite.py | 13 +- .../utils/__pycache__/__init__.cpython-39.pyc | Bin 0 -> 197 bytes .../utils/__pycache__/tools.cpython-39.pyc | Bin 0 -> 1684 bytes contrast/utils/tools.py | 56 +++ event_time_specify.py | 301 ++++++++++++ imgs_inference.py | 435 +++++++++++++++++ move_detect.py | 243 ++++++++++ pipeline.py | 167 ++++--- pipeline_extract_subimg.py | 2 + time_devide.py | 451 ++++++++++++++++++ track_reid.py | 85 ++-- .../__pycache__/dotracks.cpython-39.pyc | Bin 13782 -> 15442 bytes .../__pycache__/dotracks_back.cpython-39.pyc | Bin 6064 -> 6632 bytes .../__pycache__/dotracks_front.cpython-39.pyc | Bin 4874 -> 5345 bytes .../__pycache__/track_back.cpython-39.pyc | Bin 6390 -> 6390 bytes .../__pycache__/track_front.cpython-39.pyc | Bin 4358 -> 4455 bytes tracking/dotrack/dotracks.py | 96 +++- tracking/dotrack/dotracks_back.py | 27 +- tracking/dotrack/dotracks_front.py | 14 + tracking/dotrack/track_back.py | 2 +- tracking/dotrack/track_front.py | 11 +- tracking/module_analysis.py | 92 ++-- tracking/tracking_test.py | 18 +- .../__pycache__/drawtracks.cpython-39.pyc | Bin 9006 -> 9024 bytes .../__pycache__/read_data.cpython-39.pyc | Bin 7829 -> 10023 bytes tracking/utils/drawtracks.py | 8 +- tracking/utils/read_data.py | 133 +++++- utils/__pycache__/dataloaders.cpython-39.pyc | Bin 43431 -> 43426 bytes utils/__pycache__/getsource.cpython-39.pyc | Bin 0 -> 1740 bytes utils/dataloaders.py | 2 + utils/getsource.py | 60 +++ 说明文档.txt | 62 ++- 41 files changed, 2552 insertions(+), 440 deletions(-) create mode 100644 __pycache__/event_time_specify.cpython-39.pyc create mode 100644 __pycache__/imgs_inference.cpython-39.pyc create mode 100644 __pycache__/move_detect.cpython-39.pyc create mode 100644 contrast/__pycache__/__init__.cpython-39.pyc create mode 100644 contrast/genfeats.py create mode 100644 contrast/utils/__pycache__/__init__.cpython-39.pyc create mode 100644 contrast/utils/__pycache__/tools.cpython-39.pyc create mode 100644 contrast/utils/tools.py create mode 100644 event_time_specify.py create mode 100644 imgs_inference.py create mode 100644 move_detect.py create mode 100644 time_devide.py create mode 100644 utils/__pycache__/getsource.cpython-39.pyc create mode 100644 utils/getsource.py diff --git a/__pycache__/event_time_specify.cpython-39.pyc b/__pycache__/event_time_specify.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f810d6490fcd8da36b92059c382e225dd7b902a8 GIT binary patch literal 6293 zcmaJ_+ix7#d7pFUva?q%$Xq|wV$4t|NR#=?YES9@TVj53?9Fam(XT4!31xxS*E^x zR##u6Y0g@V%JfE}Y0ug!%^S(tq?=qw_0z)GWJ+c>GR^F47HLZ)Ue;!F!WKzLxJ+iK$J$^|m9bcPB zU68I5xT4r@6)#`w7XP*u6f5Oor7~TvOh0+9I8mOMOr@T2y1})!oGz|5Q@fa%HdA6z z>f7z>?nO6nYr)#^#p$^?d09HOMXxmvg%fC6RF_?E{#sBCyrz4h-SWFl6xJ9uGWc

DuXRI*+p%g^;pO-3v=c(c4qgF_&u`O zOVn%uuX$9ylIgKP3)wpRK3_5}X&-1Gu*j@8+D=g75g#u{Y(-j7R;G0t2wLpha}TzIm#by`52_WO}+9I%jg9rN06?OY4?6) zViS)t^Ar-W1B`f4yYj>w@9~}<>M%AVV~1=ng$-keoMXdW(w1zsY4n+0YD!XDSmdPJ z>NZ`8JybFx9g~c#z>)KA5HT;}80Wq<bjBT_`zz&jdZUSMCS4}SCXvL!3K)bwRWq9WoY4vOl8%Q9a&dlX4j7rbqSMy^I~Mx zV}TU}PP5}jdfgLfPpm{b3b5-)@3K#YhnFs`8l6xYU&c^<+G83!jV))hQ!I~fftR@~ zLwaE8NRO6JEuF-sgz4bLcyqyF8cG2R_w$?gKd!r>@X8=38htt+u9)XI#{1djvT0;4GmR4 z`#}A;cN6vR?9~sFp}tOY;{Nk-8gFBrE$YykS$ZBLh(}0uk3QG~FftQn`iU?>TQ*3A zI>yR-xiCi-Qhk^x&8RJIggI>bA*@2Wp9zQHK>2Ve%>M;sp#?2S;0R>lW3h!cSbJOL zWBXwZ2Km?qT5mWUmd}T_q*0seY=g@)NLhoF*3X9Iz`4-egzJ*SB9{%hlELki4DQoQ zJaRu^FDyO(0@lMo_*mDO)*7j38{GfLGT@5-|9~fCA)J|FUH3Y6V5*4U&M#it{*Qm! z{_w54w_n`4{obb^e|PJRSGK?RZ`-$C+}e0^Pk3YJ*FV^L?aubwA8p_KCFI+;ZlLJi z2X}Th-?;n9t-H7HY<>6DtsDP|?57{UvGd`(`&kri_g~xk%|D|^b-3r`#2qN!2y)^* zAqE+P>9Y}&Yi2j7kDUdsA}G|_0yXRK4CLP+y)0g9r`?qR;XSAdCvd8j zM};$~VgvOsvt|HLrq|N#ZqVrlRm#XwYR0UCa!l`10NGXHEJ@@*K~8B%eh%Wubeo-E z6{tm#d1^8;+Q7KTpxB{?j7S+suOq(*Wfkfoz2UZ0(1@&t*K!*3k=7B##p$QqjC9>84J}YeMGZ3YXz!Qo~Pm2Eq7VLU}T~*z^g%@J&+ZW zSPVvs<{}eAc2@wq$W-f*MT5fE1X_!Sf@AKWfuBM%{&RR~W*+cbU&r1MJ`k1p5&EKiHDzoPcDM0fG=SO;tXr%K3th2nxpy_=2IJ-9pL?5jhbDLEvs8qEZ>0 zJ>h8pb3{U|5 z5A&N~jDWMu!fTe>B?VY+#|hFSb(gkKVZc|I1&~xmQjQO%;UVKtds}~4_kGE zEy*wez_YvG!kRu1&Zmp_e)#^~J8uG;c7FNM)+ayMx^eT~JGZxQ-Pw8X)!o|-|yY{VCR!x?EGe9>-G2Vz5d_ZFTS+(){nQ|_|Yz*jhvttIk+UIt1xM1S4Xl` zkY6VWh5vyUueq`fhXPxfnv6a1GzueAI<0w^hyYxv)^L0uGy?O$nMrj|CK|9^cC~f-@+J|J&CeLN? z|8$t02kn8Ea?qPhm{YlYpo4(GJ2p8+Jv^tA$MoPIK$!z$4~KvsdlEH1;DD}ZqY3O7 z>03k;ks3wK17lV-_arHrnMk8%285Gp{&{_cG6#AZMNe7a90<@m+D}1q&_)4$9+9OS!JZPPu#g=l9;b8GHPvAOD0l zr&s&!i!a3+d8qOkPiY(wPoTS(qJ94V&NJMSlTXWY^FQ|)!;uG^F`S+@hTp8P?7`8TG zOw+|uDh47QmkyEMba2S&G=j+XyH`7nHe$U&pC}1P6-%Tcl}X?iNTC#~F&e4llZ|_Cx{lwKE*+VD0zfTc5Xj(~ zV4UI4b1auRigR0vmsy$P1jouC3Kkv0--h-9zL|r7_@)At|3?Bef!rR%fbL|jOcH5W z;C-Do85|TuAv6R-XwE4)zy~FOo{-@nj!=w)bS|*yLPqdKU0xQTvS`!tH2UjwdJ*35BYlX4nHSW8z` zWQZknijP=zEz^poIPGuLh_O>6;Efl zTUkD|*ovv+3^ZWsC3W?-+HSP>?%!yzk0R`)p@<_!Cn&B8Pr@UJAKU683T!ko7TR7b z=GkAz7$Wi2=6-4MfzIh!yoXdYjof@9~A1%=S zEgQiSBJSX(EnZ#$r@#g`)Q0{F;uoTDFM)_>;gHXJHm(FXIJHfOUqWet7zihM47X24 z$k#cpZn1Al#!UAbTA!P%oSPW0Or0ICj8B}M^RGG5o!kDe@9li}c9pJf%TsgZvz2pm zUelR({W&E%TX|{>wZ1laZfxq@WVtd?u8mDSIawQYHaFj<;jVsCeGE* zPF=0Gx{byz9R$}*(Qdh)FPUoEigBoo5t$vZv_PRnDJg#jRrxNx$o5fknJ|ucSPY|P zjo?x7Gx;M@uwr(0c#({z5rf1yGX9Ivw)hk)Ux6A;)pysNmdRj_ZxU-=!<} zxZ0IqqvPGcYxv`84=D&xJD={j)cG^!zjVNgEkqa^8*FdDwDU2<`JVS~!hosMp zWSJPI+tqHmLE^01w0Y=uTdEC`Y%f8tKzczRS~TcGn-)b26w{(8kcY%?0s2xbf~4Pn zhN5V9^H889=FB<&`Ood2|NOUevRz$K3BT8W)L+=SB}w0-Lg()|3fJ)1|1C=rlMIQ; zOexETEZ&Nth_`B};;k7P-fB5u1&yHWo+CV!+PJ9SnsxC%(DZm?_J4QV56*`4ZJHGi)NAy4oOH4iS+Q0#D>@rc9b1s$C=Ka zV<+1GlgKIU(g}8|oj-{7!$VU37(30*pw}C0grzXfO?H-@!}}$s+>=u2`c!nDn_0(X zdbOh8;6;5o=jf9c^~vekiA%FHQ~K1z)J!ybExX~YSNW{|$cjE8K3Yo2o_06utmECg zU^-^bxwBDrir2Hb`(}j!8Y!EOWAe4?Rx05IZ!h1xxwztm?(%BBST?PBtu5#Pqo|@a7@)E^L&acYUFeUFm zrCP~Y*{zJdQLAy&wu{w@7p?JP#mQuO!S-TyE?YJ;n44*P-RP%S%T#Ml(ZcZDi)=mG zdNj55=;3-w@w#lYjKyV`xmnDaUY~7cxsxeY@+LPcIWuEbnF+MWT%}TVvQCwIL4R^7 z%?n%?0}R{(9kOd>^Ip-amCdEBW!kkYI*UBYTIEc+nq^*>m`f(h7-+)KVd(nu>b>Qa zRWEk^v#X1%oao9y)+poIhYl5OBJz5+!ZNk0ZKBY%V%}P~Rl#6+^bP=KQppz!si>!} zSvBnCO2N}gHM8KUrCPyItyk{T&R+n9Yk2G{NK6SrMuLFpWkD7KYpN{FB4A04 zMOh5*0E@FOyn`&ky73ONBiNVV58gCrUl@Uv{Nt%nh^9A&Del zCCI4PIFqon@KTP}u=m5#B%<6sJoafM1)#oltRcH{6C!&TqWX~3ly@aZ*_62E%D<>Q z0EIqQJbCQNPw-2rUcj!Ei;fp4S0940W4G?y%-mRBxihzV4`|BRgePx(Eb$BI{F9Rl zvv2wvSuk6%Qg{7ElZ#Zmgyeb3%p$q5>II6#JDfhjdGS@FVc*%KsTz)UYdlF9azO5TfZ0#ME5%USFm+U}kPX;}55*o754U*(o9a5Al? zYPJGGWh&;wjA#iNm93>>o(4&h=htW^v=8`IN){+tq~rz?FDT|_8$r>M#xLU8d}c{r zc#q7}Xk;HlBFPE-d$ph(l%x3f$a#E=u_>@od)aM{-5W|VRLZ$0_lV87E7N8b_%Lw%{vNz0dP}r=OE57$|cn4jAhCYfl0+p{mJsT(4?4fZmQ?1m?Bo=;8IYeUx8zDEu!iOa{ zNNHYaKwdT?EZT^o%>g$|B57A`#8B#k;E6%tgz^d?=!r=i@{Vk;y)3a9jr|c!taoG< zrxmy6MKkJZ%`4QP86Vt{`7c3G9dtF!CtkVe^pij<4d5BXbBHA#YpfgJBksnVhglM` z2oj0)yr&Y(8EPJJq}KfzX2X(dfneDXsf4`j=dzdwaQ;bGr_fDAU=U&>L|ZU z&^aVt*FC3N6MET!ap3RD>r_iz=moH|AtGo?a^hxBD?ovJVQxY?n46{-;;;&`w#i=- zwJ@O=L^)Q1g1SHgvmFENoOa>@4TmN2xBcDLqYCKyrqr=wvptWlt?ykn^@D zuY2-C*aWbkpbG+6A?&b#u!KDMj;CO%qzLvUCyAMgxdmx{2XLE2l_bYyO^z#ZB?xg7 z$D`qi`cS-+NPv@#E)t-;K-#QK}w+^D0S3?J8B}kvVcWNz4B#rl#1~TUgF~z@z+SO&u44S z1~>ihtinWutz$Yy-~8O&%-xmcyO|p+b9WXq^UJR;t@<4Cv*^J%)UID%xwW{oI=6aj zc`0*eadpm%Rly_`|0{zxg{;ies85qu@XqG|x5G#zP&6(dmg@&QMRi+={B-)E+VFsY%h;ouFYcfGF*@e2AxQ=K2IYN@=ET2X z<&|Zuc`(fN>Vi{QU8Gn$^ zqux#+k(8)lGh(%bT<`BB(LVZ9T7~2QJ%WdV$Pg{CIvJje5CQx%06sf5$;5B`XfIwkwG5IJGKoJ9p$J2sI&tVH|6T6i)X4o5lFJjEAD0LTCqYyJNEvP1-N(&ljK?9fZHGqY-NCACM^eIYc`JO`B z1T!FQ5_PeAskEQ6;Cm|7HN(3Sv|gkcbz?%?1YlZ6NmDyjXF1htQiCs|EO*990p=Fzg zG1G3e9&)?fZolo0{NPvNR&?dbeUG(vs!$lDP=(riqEc=G2`tar2Kj-$^NO6xAmv|2 zRq6<5gtkTrsalw)bI zlML-LY;_y1Hh)`8kY$~$7pQDlm`QL`^pQ&P%cwJ=OoSkyR-xx!^5oZffxsGhFh-X@ z11Ls>5^OKDmywyFIpsAj&?ZSw7QBF{^ul=rMW7XI=u>V2kIEg}lXj-py&nY-X8TtF zw67tNTDnhz89pfY;@^+|Sw)BTiz-RE4_Yz`D2cP6FWLYN3*kokk~h(ZkSb4YZ`#`A z5Q^pcF^&ODehqFaC7wPS_zR5ABGR5vkgR43OW&} zm?M*BOf*j^Mr7@g15*iYjnG=vv8&v9LS72u8X5&)jHob@G8~#D72-!w#fK;vq~s7K z#FIw6HGYP8l7A7PRLE-sDSF~5Yqk+vU74GIWpO2QXYMX|K3_1xdlMIY;DuVnXTM;V za8~zl{}TQC>LwUfddbegH;OC#8-UlNj}UU&eBK_bIrSKE;<)dseG=mv6?@$0KTkEc zYV}iNi7Tt@96jMSD*Xb_iq0j=Zhdy|DYi0`JI<0&5BEz#!G3 z50u8S#&H1)xW`3VZ#?H7Yidk|Ee{#Y0+4}0Nc3adatVQp0jY7q)lm;^z=Dv=Cs7Bz zf%L?-JR~(vxu?kQ;$L#2`>hb*NVum;F&4r<{8)ixrg_BSvz>xvb^@M9zC1lCGp z*y(a32+TyB1Zf9%$UW*FhRh8#PP?Z|-FT9CdQ|vsjWdmrM#|}hBs{~SU}l6}8fPJi zWrDaV_Xvxj-8m*9G;bItGm}jrVcP?4R7}roM|X5#G;1 z8%(>?tmi1+&IxzAbP^gaeqRCJCxDZGJpDS>s^Pz_g7&?W(vD&$CtwKC|Fd{7CcfSW zSCH%5QTR0n-)(PhL-0&l?_;H+umg=g_q03h9&r0u-($6L@l*O;f2Loo>4Q^P%dmTq zJW#jC?S(Eq_&E5ft-s#c+BxlB6gG(R*0;c$mm0J0!&klldb=~tGg$Wv?s?WvD@H63 zw!k5&U%DWHsw3`j>s5?|us(xzy`%EY9p%Bd-E*aJ_iAg5 zLwN;#3^pfPctO(N&ZK)`Tkew@*T6AT?p60%UO^P}FgWL`fK0eo3*amPnQ*3Q1c>kN z>;!*CYRomRH|84)jm5@|#!dG+8xkY`kR7Qc*irO+eVaV_V;`yR2ucgvu+^|@Rd*1S z=x0)8&^?5l?yA_6Dq*_w?D&V;M{46G^rqYopmx<=5Gz8?nelsJp>3sct1{Nc*%CM^ zu(t#b#%W)>3Mg~eaXGWo_h>l+)*EV*S*|4Pv6q#x5mqEH#qRyBn#Y^ zOJVZgAPqo6%IgNURWhTaHzKGdCKF~LJt~9T@=RcHuzQfpW<9jIV^oOmTR=UQ{K>mz;sIk;X znrJ0Ic*jkPXw_Z!u0O|}z=N;36Qwyp`vCu2_hbQao_qV!5>ogXY55uO)^+zY?gY`r zng0bTFN>p{uscTKZLH-c%?-y&!F{<#@gMhQKQsVn8QTd$0(S2N`6lK08!YXdaAe5w z1vW<15d1Xmo?;V2h!uULVO>YC=cw07XOT@ohAUVnRuB12Jsw0a?>jeKNKRP#1UczW z2upQ(seaI|Zt$F`TN}2cubFyQr@*j?(R?Dm`Q*n4SPNw>?4DIR0jXti#6qV%<`ytB z)%z*UH!g9IQQ^M=W#?)6+T?jSbR`r}dj+qKSkMhBk^+V0i>PnGu0!jRQ}~3mB>|Z* zEGSs?kVb)Oaa4H`eYQJ~`{FS1#*;laTkLX!}kD?!`WgTsZb;R1qBUbKwRP3B>d z;pi!3Ze{6^hkuUxB#RAgv@8}NEOP#PfYj9@o8@na+}Kq9$pIe=!Bre;)wMC4$nagN z)y7&UEHj(+o{>sb-?ws8qq=W3T>Ic!T^*audx~X0unpw8T&yG1;PQ6F>i@KfN#L=Zw)xDq(srRgyHVz`~jw)@fT4t%6UX+{(xD7bA z^E6^UPpR5Gk9CXV1TQQOOAzqlUm_S05HxU1%L@t)wBa4l86y8vQB$fGE1bR^Co&7R zgLguQd!ABrI0bcti*5uF;IlSrMi2o%2f;>)MhK%)xL{tuv}(>HFO)A=v(EGsr`Q7- z$W<#Nd>#In@X4#?^`m}rIrM@0GFC*VO!C^UeuCWTJ< z00G{nP0ZsNkW{=O)ZplktAk!N9L>Wdy~&AiN|H z+z+XRiq=Cc(llZsw)Q&ZIe$xZ_ps=|4ctJWbIa)MsLtd`Fyy>|XvxXLA@?(1^%Umt zZBeA}>;;?+>@37$%hfd=1Dy9DN#Ruap;;`fLsA5N83WP5tK_SA8hsD`TZH>3NQ@|* zh-RF1%-M((s~din{{5~>y6a^ZO z(^uzzM2V1UB>jv~exqE5v@$^U0h=Cqk^kjcQGonstli zP{7zfsp4Ou{;yK<&y@TNC0|40g~$Q15f%k~aNdj-1|5rK{7=TeM~GiXV#I{!(>hs~ z@B6#!0oA}4pRkP(wZeIrkl029NABi52NIM21`Q+Qpcyib7pXxc+dfiPD$IzZBD3dL z(O!agi5-dkjH7Lhj_eV_W)!HvnVp&|+n$P(bewz_8UGvVJRta$|CJc1CP+mK0b0%G zWkC6T8aA*gpz@{>-zQI{=F7qF5SkE%Dg@s@pk)6(66r**(kCZ1T}}u`J{d?T6!Ip= zKdxwSCi~S9xnD_$z(8N1Pq_bsaQTz+h#Hl3c?{_cg63g4tn?!U5Jq6&a;UErV((QC z<7}^2IU=8xPtiySq$d$d=u?4@L$)J;B#?ug88=`8k3%i|UW5{H3#J42ARxF6(*YSoyF-lJ z!M#z2Fe+}vkZ~#I(YhVeE}y_{nUlWpDe6z5Jls(}jq;g}@(9YQj`CTQ&vleX#oW_u ztUZHqU`%viOrkv1QJzM5rlb5k%I7=E7eJ+ppwgv|Rm`#%o+@8Pn-@FUTwzy1iEC`G zJ&WrMW;|P9i*3ja0lC=$c?lRTLtN0}Wt3m(==C=Gyu+6EsM`$Tiz}{z!_u%5aTN*x zusd)!6n=y)!wkH0i4IW1aP?yp1Q4bk1tjk-)dzF;YmPm>X2N2_CY+qeOkA2cPf?Rd zI^B}fpUCIxgCfAE(_YgNTk=(-{_4tYx;Il1w{$cSi3~9_pM`=!IMFQE>fvmeKHR`t~mB+v~uXPV^V?}`bdRJ1m%4r_4V zY9jjh(cb!SZi8ikME90P_2QU0rcV&5Cq^enr$(nKOW|RqgD_Yf=|5Vs7qtS?*Q)5nC0?>G}s@SAry9(-crPE{rrG4Z6MqOA( ztuMca4F%BJ*PxTY7VMLxtlq&sv@|-As6V%1oBEBrljn6R!Y?s%@CyB`0~=y8y%R~>)@NTfA>fDuJr-INV*7dr^yc2TeYu(rm486tt>CE zdSS>oc(}0D5_7x&i@oC9r6Tv@bPEu*0_OW7WRUZJMS^RLge0yp3X|4%0lKM5+eB4L zH6r4}3o9lluyIWiGVa;ncf{`rHQS(I>v%s5tD#TWEwDJmKMC7n|4+yI{-3OnWCsq} zE+=4P@Hl#V>fC~kr`qQhbVJLLwxM4TYJl!QfC_i)=s*!cUsxCj=lEwCqL$7w5W=B@ zTXl&)L0kR40+3Xf1iwoFPh!f;S34m5!mA_hl><61IIS1gduC!Sq?7zSL1g8JVC zmQ4WxI)W#ADlGGEz*4yPM2BjrTON*uSv8l*a2P!j|1N+y>Uv}gS52HJt(S{y!eJCH zmG9T}K%&*kRR>kLI!8G#*m|oq=ri1|AmxQ4VqDd*t7UjN+KpSc7x^Ece-1MT!F0;y zDfuFeNw-dXlQs8{8D0MV9>Z-s3tlBzaK?cRr&zYfT0%tJGqE|*-{|sd$U*1XvhcEe zN0ed|zOzd9A16Zi&Q+XpZMTvWiwQSUcwM{~MWe^>RGjK%MBaCZ`>1kY&sZPxv4{c= zHoxzX3O6$GG>Y$;4$?s44Ct^Jqm3gna6eD4!ew=WR{uEpxTnbz#oG2ZQ&XbZa@Gk(srzv@X zlCzXFDftE^#IHX45|?$b`^2;xCUd_BR>~|11nn;R&gcY@SIjMV-$J^S0(K zysf&u;>`5i^vvAsiQ??^>?tSJ=EG@0PwC!Hg>lauhQ;IVShv?a6U@7v6a}0cIz4{z+A- zEzJ%@Os38j|jceyZb2SLHRcl1rs@BnqYEpZ(mBsL4r4?w=XlZMu)<9EP z)vj!-T4PaLVGwDz-Kbq^2imQK(HcnIMb&J#LS3kZp}HSLwlwMH!} zm&ZUKj-z4RF^trklJYVB129zAvOBBbkH1m3vr>^*+T($gwwaNOtMSrO_UGPz=!zZsw)8gm z8Fv0;$4|q;YPU__O=0CY;w-7^GkKC0C!?*12eEa zD|R7gR`v~kjTk&1ZNCZK`o*DN>h582=jyL!rgt~rzrFp^&b8-vuKn!R>+jsW@%9Jb zdH;hquI&8i`mOE@JI`(3-oCbaytsCI`^C&TbmXyW(1-$cya;nGlj$ZsW3|VNiOu%r zdjqpn${b{T{H*aAbw9XO_fbO@tq2Zx0qabe#(*Ii>n;IMsgO7m0g86k1Jwi~)`Nz2 zRHd;B8 zF-vT4*pld4Nx$c*1B*(my6-Cn8{v5zo@!@2wNH`Qa(`qzZx6lkz6drN+Z&?eM9v26 zS+I`V$wjb0W7Gfcc(E6|mznzcr8kM9lZWMc$&JRQeej%%5iOSyQ>6tfN2H8LfHn5A zQ2&f`U7T>y*m>-B^RY)3PiGNLrw|tqY0X@m@2?K%a8dy$St9yC44cdvLLo$T&dH7g9GU?MsaD(>P9%wX082Lnf`rTGRt* zwDau6Kn3a;^{tAO+EoF?U~RRUEq#})Ei7QYQ4PxNmPCSwoWQ)*-PnOEOdN8beDsNj z&OK`G({=+1hoXCxoU zaw3FBvBe~=F!-1OVdhIH zf|LHumrw$b3dQDaB*im6!6z(_AL{)Th0A?skIIrg zDA5$T0A3VC@tUTWPq}%cvJ#Zb+AqTdWV=rNLb-gYU8yH4>RynVR8vu?Q>UnTh#K-? z^>u0nMs9j0Ii=K>^3ssR;|c#Rzu?{HpYVJyr)aH;C-bApQ+UETG-^lMVi=xMZJ8BnmZ=OiDtFscFqG42hW}to@S&K$J*4_%6nO$HUei7<_wL zOHG}uH>;I;_#is{9i>t2UNAVM9aBUO;){0Ls0qKQ?I>7jsr@)r5ub{%%A`cq<;Tv1 z+OH^8S(~L3wOdb)aw96sQ7$GpQ>JZHE$(66*wl|PpHwa=2Lbrj{k9LpxdZYDe?7tnkT*x$QJO@e%QWHPIPG;-XzV{ zWk5t!Yc_h}E+HLF$jS-QXvYlqT%x`HH~t!@_sUh*8si1vSHa3Cy-%6>Xo`CD7^Rc; zNnz^C=p;8ilJ-&n(zaBUXa9(WkTl8o`o1C4qP5HoaU zap#rSZ@v81%^SZf8c`N^uKnP{&7Wqd6Uw>b)2lP6&+x3y;*H1bDA(liVu-pXI9|j* zi;fo;6k)t6fv7fHO}gjPBcBMOwrbqoej}YBmJ*l7Y{Gyk6^WCZH-52mb>oBIba$@4 zck72Q?p%NA=8fmln?bfZvo^CjyEa?A)!p9NekF4#9ZbLfkMrb=Vlh2B!C6yDqKY%H zlR}oNLbVCH%~0u;u##3srz=Kpuo=@>z9;X!rPv#N40on3%@ALzK87cSWWu z*6$v&t^qP;>N&DQtM`OObcR9eMfL()fDK9e3KA27u{%Q&9bjSy6#y40cK8HIOYF5l z3{K+hBKonL`pZJsY>tNJ2|8S&G9w#G@g(!go!bp zQQP3t(}XFnz5zn@6gA(Z#$-b))N4>f7NH9X_N@f?$GB8<$B^NqfTZTJ@D?6}f2e8p zdE=hr?HAmvlsug)mu0hx1k7omN?0>Ci~sjtp4F3Kwbojj3R}nkEA=U=$to!5l1h*) e!W?m@TeWvG$?F~@b(yPe$3r1iu*Sa44*nMnriy|9 literal 0 HcmV?d00001 diff --git a/__pycache__/track_reid.cpython-39.pyc b/__pycache__/track_reid.cpython-39.pyc index 2532102649ab96099343efa466d2ebb9707e318b..c565e610257314fffd4bdb229253c7b00cda1a61 100644 GIT binary patch delta 6059 zcmaJ_4RBo7Retxq-PLMWmSsuSzhwRYt+dkWZ?*cfer-#(Wm$G?$CjTK$-JzS55Nb!sYctF^rcY_0kOa>ZLr4OoG=!lk1=3KUEI-o*2((Z0#-JWRW1lbP*!=j%Tf?RLSY5_jQU zxT&DGoqH10>1!+yjz!LsC6fTP6Y`OSnaWC=}T| z@1`rmpL}m8zfmp%DTZxp=j{d6>@vHB->$$edPFDRL+j_ex$>l1suU?f#q2aHMGuXO zQlm=LVsH6aM=y5Tonjjf{gAP5=QpC8!*EZkz=9{$Wk?Y{NvK4#t0Cfn!6C({5#6E( zqVdF*;LISrU_!oEa}ldzrn72AR1x~yim4Z1>I=&!we0vCbM7k zi{RGedzV$nZWcZGT3skm&5$_&a}VMf!ZU1?TVL2)oE$Oti$S~IXc7BUOoYTpsS;(I za3nFb-5V4m8KJb7t7~>YvUpijfn#l*N7Bw^ zJ-mk3()!i7VLe;cHRy;_hf9wnuC5wsbv==GTn-zLrrn9lt5G9z-O%$OB%KvqdrU_= z?TIF$@q`hMFGZ{$l-**p)~CwL_x>{RxD(-PkwqTW^;0x&+`3y{&Td#=FP|#+MiYy2 zG`hBu-gUo`*;Q^uiT5%3;5vbA1u6#jaw1m=C`;*M1TpL9DuVmo0Gak0dU)y5;>Gpr zkwn^ak{XH0=#rtoVtu!wxM+pg=ZKL-(;CuaT8plATBVgW+375)foX0HRG#u=%Qew{ zt!FCty0OU>+4y2*kLyKzulTI*R#vg1=c@M36{$t4M;&q(vK*Gn9BK}8t1dOqk?U|U z`c<$ZR>f*rnVPSau~t^E7P35sNA<8mEuXoW3+)`{QS;E3&n$muNqvu&tLZ<$L`#)4 z-PRxNo-#BUG%tRkurCtpI&*pFj8Kxo$>Lp8_RWK%`sF&wPGyRA!Fa97~<8WJ6^FP zrw$pNVq8vF1nvgtwFbJFf|f@i;ea@>;nL@fz|QZ)=yqPM&E(aj7>7wTy9=v+U=UHS z8)2EoRl5gywHL-bDB36xiE-rBLzwCirp=d8_frVl7qIgnuaoWPK=+%|$g9u>;`20O zG0cRdUQc;-tDW5LFhXJ+;#4Sj2Jy2ZHXwS%VPv>`-hc$xi;Q~&&rv+HFvxvjjg(Pk z;#;v2J#I`0&o*%;<*W$`2{vido)- zWE|wpm|)uau&yO}+?+R0m?zDLF!4NZ!Oorf%e)nN)(4(>nP=OOXB*Huxt(W=!S$ZZ zvn8O?+j+K4oZ#&;&z^#`eB@b|o-0mD4TCat#Z+uW92cj=Y2G38tnp5aNinEtrSn6M||z-OU)_y^J=N+O(#N#xQj{Sh%qZFtobN*DAS z6R`uxr7rzDnYMj!lTzs*cneWb9m4oD!pb@9In$bvsc>PteHh6fo_-#=w2R!7VuySq zLdg?#&Rj6h8w!>*nPT=4-Yo@8Vwnd7Fm{ypAeWvK3*x-YrC1gmQ!dV)#TF=)!l{j8 z_M8|(DwQV-%XuP^V%NnrruJ84?>n(&%yFf@*!wuG44sq z#xOOEhfudKqW&6&T}DvsJhEkuM_`U&GKZmZRH$ZHgzcP7Wy75~Bf_!WxFyJY0#qNi zPl*dK=ja_362r!65$0nl^_dFuqH)H4SOmpIlCvY)CNtsIop}Be@$&4?D9enG^NA>=YkpT@mHg-u|oJDL<(2r>P}1)-4oelw+RLad;}5ep06gUe-U&# zYb9cY4MW$dPSRynR!Gzr2+9ds2_^|_f=vRs@DoJ+BEd@pzeG?*@Fjv@A$XbK6@pg@ zs9w{5mEf*oEw$B@{RYvm5&R}WHo@xzZ&=T?olbK7O=|oW!EY1DMP^DkYQ9DA6@nWC za&x~+)K>}KCip#qcL;u;;A;f$66~n!s430*heUmY;ExC@2MMX<`MBUan<*txj#*}V zqyKkkFjE5(^?8El2~HFIF~Oe@yhrec_09HC_Dw6_KY8(e>X4fGQ=+~_@Mi?FLi{hH zs7%!VoZv42X?9uvHZ`Q~|B0x-q;56N^)$8qir}vaW(ba1f8cNG{(B<-f#5@eewK|3dJu1pj93=@_W`Zz7Kq{13rAAg$?F<6Us@M2;2hm}4&Mt&U(-rr1Q; zn5X5c`RW4&r{(S3$MP&+=M1a5cdc`;=B&^aS zti$qzk|npm(&Ca%nRYxDUcI#Cn$~7$&Raq9Dha-C{W#QKep|&($zc^#&@NQVZq&$b zR>Jh4H9648e$5IG)h5*yT#vcSbQoI9VLJulqyCi8Y?qN`xNW!W%@(+gZ(<~sx&3Rn zjQdR|{+YzM8)?t8tV9*TUShW>+&OqQ%0UOcFRA(|T)*yKITde|lDty>U4ayk`SQ8J z>Ew;v$$zgi+Tk^)wnoxaj-HE`-bgy}5p194v@@E(Wh(8$CCvKDU{11_h64mufV3-` zSdOknpld%hv(_&gobEtn)N;~LBF!#kv@M-Xf^;&+S~wd266wBH>K-~Lr;?79gIkPS zb+cSN^us-e=@XDyz699*!^bH3xBiPb05er4p)cnU{*8xs7BsHy)HtQ!7Wb^)4;gxh zU;uEVa$zA5>hcHr`uzcaSN}rdQ6zu`JrZ6uqH7Tuj&6khU!H|9=kpQcsvb`gxey3; z^@oD}oxNRsT|H9IEsh6OOzLYmtqxbR zcde&~YucyKk#;XG%Ed0WsI$6ssQGl(;v!#PT3pndh(lXST+?5+zCB!%Cmm=KE7IEu z3aspr+Psg5lXmzgBk!|rx|3#>)Isyg6?%yxPml`G@6S*FK)_^vYjCuYeapHy xdeO;LrfSxIjF#lN(m6?`)1kVU3y-Rzv&||U^X?gOx4AvgX0hArZuVgGe*uT`U$L34$Pqb8#OFEN*}}iHqQUgA^%(Ai*Vi0qqi4fU`qVqW@%5 zUP-nRgGOa6l@pqkfvh-I4o41cMN%B6l2Rl~QIy0vy-{4QO3cVEMHN@#l*^S;^8PG< z&|^9+R^6L?xUO8*$l51GN=zM+b+Z0RW$LhO zkd1gBi5!)|ln{z+lFi5;lOfrH_wmRH*?Lu7RgGVY98M`yCnLurCtW%9v=VF&|528r z_$RN&E6<-edg4Y;rHq)7Su?s6uZ-UNP_?%ujYgRL)9o^BhTCSBW|ulT`&yp}FD}dm zP`>dyf&@>%UOb6k@Fe|%MI;;{#hS=ftbnPKA2ohcBZd4_O661{)fJCb zK@~9_j|Y!eQ&h#Ow5qIRwV6d;s+8J_YI&t@)tEjKTtd}UCOv3aE0uXcH3iDX8%^u1 zdZ@{!>clGEe&cgekE>K8z4I!0pBIyg%v!-@nK|U4nv^Pi#fsHn=BAX5XHK&k%`LI5 zNtun;Ht9$A!Kh+2QSF4Xs$cy4w8v_;LRO2_YPC@#EBNdb6y`bGDYPb>d~~~GO;{nk z3s!|uL?LJAniBWoUAU&8JS20<@iuo+C{~mb(u-9R#wf}a7!x{&;pmETD3Z?PHJ%i z7;zVTV#C0SQUyNHO>HutM=$fF@|0Sv6e_)n)n-;w7q_5YXpyYK)a-!WJ(#G@lvve` z%VyPsji^&%s-9AT1y8ASP(xjqP|c}^ijUO|DP|3Iy7j74yQVmGFl;w9IQ6ufy4F;y z7rpeNN1wZ8-dGjMYadZLP;ZaK}^N?`mBC<#Q>f`JVO}$hGZd|!ZUGk z8ghHAVH$Ai%^(e@1ohFLVkL?lfyEo#EbXN|nO?Z%dTnxOv%3G`>Q(KUmj-B%hUBg< zXw)Q&&;iU%i95<&Hgrp@iGtKS4-c2c?80UY`HD--ldtu(FEWxNX z<26|t-zv+XX*4C0cNAHESEDLq$5WzM*`-(-6+!HKQi&H)F;YJyExwgBmZUKmxTD|I ztO=;|UG-qO0yOSUCsJl8qebSXRBKs)QIcBg~7*90bezjc5D*b^U79d*DTy02=h9;1`GIgeTCqU4j3H&+m?1c%5NELO zuYF|hafYewje_qM4fF1Dw$-F9rf{h9JA`{nYpJGN{VvA(p1@z`B~ZvS&=tTgyU zzGg250%wbaPvnV4%`YlNE3jM37d|y#6sm=)Umfxlh+MH%=n&tk>v-#;OcaWMs1{{v zzE&cdM6Fr?4R|15ps_Y@mi?nZdBqRV(QG+)xBi>On^~6ECee7WrJ`09hCNx;oNSC$ z%OW>qO|Pog0@N(?*ZHW%sU>)+Q%5zidiZib-VHbne46^5MwgHJogg+)KlRIE@KL|n zMEx<4h|F{Ky@Yb+qyC3zLruJ~5%-fWA2myaEtijWpo9N3w^B98z7Kqq13t=;B`zP8 zT05wQ`avi=WEtZ6@I#8VL|{Yly5912J_^w=m_Lj1KuA6r03BsoC&84PG^r(~A8c;6 zaKv@_=mUfX1iVixQtefqQK%BJ>uZR`BiOYAj7}9OqcPU%?p-%5(h!Xxu2tMc3u`D| z2I&EoGMV!$WqwLy*<>b$Vzgrj4`ZOMY8s2RyCgS(*j&X^D$5@rxgoQI#@+4;Azg>* zuEt#K!{j6MS^H`Ks@E7moO+-gf~B&~b2aNcH;#qUoUXN`^6^Rpxo#RI7En2?IlHmO z2XOZ2r2{k$qC1GL4x-z9tg>$0MktMAzx7-c4R*k!Q`oB1Pdei|$Yr-q~R5scr6 zLGDhPvMyNDU_q58z=B6*rz<}JGxrlPc1(7G1*hl&O}i`zv%rE$*E;7o#sEHJ2Q^PP zCus;Q=-5S#>d5e- zIT{;t{B&+j$RNVpMO;-*O9V`rpTs(6B|d=F;QUyQ`3K{3#3TzSlV;2Qf33 z;(j+h#A)*p>Yz*Tggsc8Jy@cd_2rnsa_r&dFja|&WL>7qPVSnr>Pt+}W!x}w@XuZ0 z<+|*grHglxiX6GCLgi)i99@>9DfP2uR@i*hIZwS5WlP{}1U|y65sXC=%Nw83NHCJW9YPL~pULu8O&%JwM8G?t7 z8%$_MJs_yx!fGa#2in>=vB(Mgc4N=@pP(}BT?k)^EF{hZ)pXW!*oefWS}YrX%^7!I|Ate4%f)J38h^*Rzh^kiaDw4G3~7dcV0e$=9~r*O zVE57WMu`?<- z0()`X<%6zI6bYlt7K8Okc3@BhJ`t`;wex@&mqw!znwQxV*;nJb2KCFn2Yf1p?;S*bt=YOs+Mxc1s9vfcE>E?cV z7RVn2 zn{!LXVp>Z)kzgt@A6tlCF`!<_OtBknTR6MT)<&!IKIEjkQ2m^G@m|m9w?*p@IPH#$ zcWh>uI;VKPj19B+whSAZ?1Axm@va>nzaljI-^Po)-uH5oO3=PPKDMpb7xMXis;|J; J=oen?e*m|r&fEY1 diff --git a/contrast/__pycache__/__init__.cpython-39.pyc b/contrast/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c772700a8ff41a63e1c4beb552566d5475c04ac GIT binary patch literal 191 zcmYe~<>g`kf~_0Aq(uVh#~=t%C>WV37+6@D8e5rKC>R+SnQ(DAB$k$B06Fb1-Q5Da9LV#$)q+SF#V3&v|+QmH)TIknYKsjNqmt5U^7@)904`To;0 zqZwJayp`Qi&FORb&woyz`Ty^~9OaYAn1s((wqxeo`y}a?)ad-EXzat+{!bK4G9@N6 zr6`+T*C6HKXWLi?LGNj5qrdX2MDwPu5dRTarazsMu3Vo9R-o z*(=)NVqdA>>@N+N1Eo#orqZA}DEcFy+iY$Yb-g%L8a9VZBj!kHi@61TQ5HKTnOj+$ zCGgzFk}QSicGknvc;3Z&Ss$JT+rkFerc<(cHydP|(Q^+QV#9duuy(SMS%t4!I~FDI zQw~X)t+k)wm19qEEALp$sFaOEdB=FBRx}=Xz{ox{G4{yB_`}Bd*!V-Sm|?73__*=z z)#ZPDW%bIND?h&8xccEQU;1w2wZCs%dVl4@na0`E%kRIw^21jeS6}<$+WF)&sjxw`z)XP8yx%a?w}Mav%$BScEva+R7>tvUWC(aSBT#>-DtD$Ewd*hW9&`4hP|`O(U}WoJ}h zw~tj$e5+tPewu1IZcXKj#atCne`u;w0#B>6vtSo0h9qov zLrfLVFw=%5C+vnz%VwJ?6$p#$6dT zUDP5hLLARhPcWvm#^$zTZFy1V|KY^z34D_-b_9;`Gt#uobQV3OyYc;4|C|I$Wl?@z z=BYNV(xjC^8|$Ez&q`qZIi~2S$7CuK zsHf#V)KDs%=1h^yv_6|qiQxMLzV^RE(j3SSfJB~EY@=BRsMy9c5>prD?*WVmR6GSw zfC@p9peNutfIi5arxHwFKDUm*-A{Mj;xn2m9y|`&RisqrZoC z*Bwc{igi@=`iLb#GHR= z&q+4tB-(QZ+H(dlXYH&Mv_dY-gBzJv2~H2*BmuZdW@ll0HD7?)wXHd*%5Ebtm%ELl zfxWZ?=pp>^rHslSL8YTeqWPu#b>4$#CdCarpj{3xRQX+~^DR_trCuG%WS=NF$M`mC zh6^^S2ET_0?xx~iDt1wE9~Jkb$ZR6jg#znG^R^|_Ek0v80P5UyrO3=Aw~8+~x#m{Q zxW##e=Sq2J>X;u32!(P*h0zGAf+kQomOtsM$4a&t7vqAb?T4H~$+FE92nDC0yQ3|} zeVuraj+@Y0+KsP)U)I#XFw4iWdo#Ai)=YM>>p-+XwXxYYz~+dvJqHftL{~II$!Ua>Y zs6bg&@+lmd#G3$s8{|y}ZSFb>^+S1~&=57P4)24J523eP8d_2K8@ix_YNHOd!W>fW z2t+WpsCX&VA`oZ=N-Mhn75)qG#uIc<)FwAo@a}83h<88u^$EHGZxibMqCV(t2FJ8J z0FFbb^}2(o4TE<`D9s*lM-ALZye&y)qc5FDS=QguG5HV1? zmV=}VcdCUS6Gr30UpM~tl_1nve)D7aam#O=x$)Ug8y|d1?xAoX#S}aLf`FU}yrK7-V5EyJ0`ygPn0gPXV1K`4fb6HfrSH z=EidH{Ho=dRy&(BqfO`=TdSg%K<_(*c$m`B$mRgYr1=7qh0zbdn9w&sU4j4}rGlW^ z4_ou_Pi$W=e#Xb~f zf?T9r^KH6XS+vAU(h0$F>TG4rqzJFt6O$BbuJP#_1##zS){H-mHJ=U+u(JU|v76Qj zb~iEF9B6K+tC=-Z!H$BT75hyEi>|f1#zEgTj<=;EAjwxMjl|0!&m@@KY z)1r1u+0oCxgO$4CiZm^aukdMi!+YrrtUUzPWawOJKcMpn1<^=)BrlA*(yT|A z%Cu7ea`N&heD(CEry51D9AaVWJ+jY>vxuOJ2R^Q#Ot{gy;wD&h7@<2rT_yjG#b8oR zHzr2n!N{D6G5CZ0CoCbj#Kin$IfXh??{b%1eg-MH++0K3{WV#Q4ApdUVD&FzQh71MtabgcR!yw`mQu2|^9|O@x3a7x-m+f@|B;vOJ!H|gC7PbYUCS-4; z$kC59Jv~2W7fOX<9$<9xmW@$9MQn^hLJ;dC3=s~Pl$K}?32cI0~&UV-i z7=(k!!Vd?L1^MzK6KDH9YqR0}ma0gjwSU&!G)MXYu>|$_if!}5*d_mC+G>yR%PrP& z{MX5pA8kv~e}mlKjY2|Pj7S)NeQ=EvGDXV?IZctVl2TJ5PDcC;Oi8Q!80fpszZD_V zuWQ$Dk0$e9g(l|^JqvwaLldOinmuq41AIZUEI=IKhUn>tl7Tpg4F%#PkSr#;1P~`d z(K4_pDR3ppl5NBRRaEAWpAn2M9#bHz5w3 zQHrkNuw%ZLcGG7ifjIX!5l5%~=>T24org0(skOzch|oQkY~<|+cg1`cvc zOz}Th|5M&Mbn$12!B;8jww5}zFsp2x*a)*Yp+T20{ib%Xy|n{BWW8XOo!`nLceNMM zoRd{cDB&#&-8tg&Kf2wS?rbf_pCdA~q~9Xceyz3IE${RH1@W(M$eFm(N%iAv??NHW z!*-|w+!2^(iNR4IH-yxSBMBB*D&zs%VM0b0t~$~%WKD%@=4<4o!OZ5VM_4b)C&ys5puOVFL*trA;6o zHlLyaMukNM`G9N4&1cX=#+A(@|HI~CYRm`$JSx6C8|=PFGelCP3FA<#A-sr9Ax8&{ zBcb`#P~Hb9ACz^#drH>edJYk+bId<5MSe@`LkHXT66u9!kh!E}2+)+0Ae1uDDA|E* zrS67-Ru3V$p}5gzvIOZ3F9A;q{+k)CO?JLU*0Jo_z>B$KwO283Yn!} z){HcFZpMP(>siXzM@4uTRG*|6h^p&7h(5%FxVh_G;Q+}X>6aAIR9#K0lvGcv8ZvPF z1@w~Ow?Cw9I=-ay_Q#mXaP!3!asfPK+m4f~4sEyZ!QGxpH3SIMtDQxie@mE6jfG$~ zrFw|^gyEFSLxL(Ss3y-!GqNWm(GHKZfOLET;)`@T1va2RQiTXd9G!&tI5KbuY{K;z z6Zxpw5dVOM;qZs|Qnm>cUZlKBj3bebd%h6=CuFID@kDbxECZnqQDBjzFrxscK!gz~ zH$t^JzHS6`$XIDjIGd<5J}4YAi^yO?C6Q_DCxt|?q>4*~0Ty#M5sypKdx4{pst<~@+>Fd;n8i0s zbe$kFX*#`SCULO#2!6cD$?Rj}quD)=j%G*4ADy(1<+(K(B#&}*c|NvhvRh>*E6y>C z*GBJbZg%X!u}Q$+jAhp-ee&xsS11;1$hSik)?F==n?YJ8%*}pOgt^&E(MZeG%xYw2 z(88-D*&DLQMGM{)wYcWUbk$6^h)YV-7-`5#0b7J^_EX)j?#DY22$%7=`zo__Nheq$ zGF`qr{gI5|i*$FL(K=3gMjI#W-ukPB?Ey47mhEilU!4GH;3~`$~*^X7>Cs7T6 zi&WbW*Q&_OSd=$&=%O%$%vi||K#fcdr^|gF7o_vW{86jO4QhxS%XDEzB;hhW0kB2s zGK5aUHe&}+Jy4;`2UGuMp8q~&<;?I=ipFeTt>VqTMtnk|CvXmgVJ186hl77Yz`Y47 zrb-O8YSC$4mHs(M0^=H6JJvj%_yLSau$7tudx@k=LLN~Qu%Mc%;l6=>i&l_?D@7%Y zkVzzL5HO`+iSbp^DpEblfXvT>Qx_W2uSx%>^8~&&nOP=#5IPV4U*joE^VEgD;0A(H z5R00pJtvjNHQYL!JS3gtpH?Cg54Sctxk_f{_kv;67>5yI`5Iky>y|@Fdd}QpgbwEHYYP;@u_QD7* z;9FPhla%taEoTxkTq`<<7ZSo!8HeFwO&AB^|B=PXnrzKh`f_l=yry@4nzSnin+HONq;QEuf>Ib#2qHmWSrI=oe}fq6Ib5<8%DG%d zK9ITBkK}T!GL_5uk%~PkRC)_hh3#WC2f=#?S>)5c#s3jd%?~%9 zD#WyzXC@w?2$WB?Puk+nP}~sun_K^eGfK>{9(XwTCmv3=mXmkq53M5+_vUn@oc!-V zg+=n4^jKuT&4Jd@H@P~MO3~&dv_gXeU4D?7q$nca*rDn4YY4G#9}hX@xm<@^(e})J yeQ&A4YDMdDf)O-dBqw?88^Tu&d4eLZ#pqPIt_A z3VnewO)z2jUG-w6SnsZMt8ddU)q5&E_1;Qvy|2=zcot;(EB&bFgndb?tP^=rK)GIY zh)$FPqDvG}4vKD3LOCRQL@&x=afj#^>n^d%1~Dqu?+^p1j)*}qgvO0xSZqMKNsNe% zDDQAb#ilb{{>|Mqr(r7ov{v4njIci>i>%D_q{R2N%Pj2^(m7uZrBgfOH72X$%^5eS zaoX(CW1uwgK8iQE3KDAbEYcQOoYfd=EadY#HqRIMNgnf9k946AX{U`y6NWH_bwy`d z#+x^VF8CxTb+wmRmdMd3zPRcHEw1SV-r7!?)Vw8Z>J!qo@<(sdif=LyN#y1ZkklOa zv8ysqjd8CLq-wcD) znDPSpcEjq7@72>h)j}`y-LfHzG+{T*rB8U1sqV}K*EBhlJZ|o2+rd=x{2_E6L3_{- zqOk%i@Dl51y{yO;)#V28YdWqD9Tv|A}0L$epS$C^-VI3-=|))&Vw@UgK;jHYRA?eF{1H}885Hwb5a z7y9*EQ%>2Ey=YD?Og1gu)F3sdmhVow4NN+-G|8m&1Qa|#%;$|WJcBiFab{3hzvN0GWtj- zS0URyJ3?GYDY*@BDe?j0ggD|kI8}t<2ehmO(t3o2Oz1ef+)-ucGLJRi+zC=6vtNG5 z862{sKD@yo$fOo&r%Ex4*h|_OQyLL_m7g`9UZcxvJmp)rrFvt!evPFZN2=7UG+fx@ z!;WjsdTZaq80=HL+J!C25BXrSU~X;ye-p@U=p|{bt0T+%@%a^E?E=Ahmr$fOY$j+q zH5j7rHd4!JwO||Y(jwfp8hCZjccd52roAqM=S_p%Wtsr~w|_<6o_)#R>mxgG43d2krZt4{{i zZMW?wM!VdVylg%@wrZ)5fVt9R`)xg*&w8x0$`8s1QBQTR5vIoJrq@XI+PNJxk#rp) zcaxyfQO=Q*K~sjZ2dr{uCAnbjwku}MsUJR2N#3_M-bZtnxjaB!9wVZTvX{nLg?rk- zc*1i#*@^4Bat7zpPWI%-+RCs}t*pyusOu`(i_g#4nB0%DT+CK$2BGv?@(4*DB=R^& zYU9j;^BxT85Pe!+FyZ;Ge1ZhNN@Qg-@@w>|HnI-R?s&2zwa1;nZ8_montYl+&eG&i zVec>oX-oiSQbRbi+X2n>V7nxUOp_lMHrz!!Ue#>cI|-z&H5-U+L0F}+g8AgHg~P0V z{qr5K=^f-dlJ+C_Cx0wf%5|!e`sE2CYA3YxY{`U?lCVM^Ne*{!{^FCUR&rkbJS@>I zm*un6R0Tn-aL%cvONV&lL~7UH=>8KMCQO%KCvu#~ngu5pdp5AYByaXK+X0#m-U3r( zqcpz?2_Zb4fNA+RVV$%n^R!G=9v6IbgEIhRR2<=h~$Pw?aaQT(!3FxQE86yh@Pk;YWECjac|TcVbH5g z_C_4T=A**IGJ5i;ppep-=gS>|rK6FV$wj>rDi@0^d}orm+awvroeHfe7a4Q>q#pOh z{qedm7xhK`(YpRQ3Xpcz2tYOwZ*$30eNP^u<0>=xEfgy-5Uf2!G$=%S2uGw4?ZUk+ zS57o#-D%X^s(a4yr=8FRX!a+c^gZ#Y8W8by!f!gE8dsj9G1l0{@{s$Nhy09f$)Nw6 z)MO_Y`*+;CD*xZQA5`5*xc$F{4=vAC&S#5Kp1qsqQ|M%R4dKqDdfk~(7^&i_N@U9G zQyV!$mBK{@9#4@u%@azKR|l2;TC=TEuIjiWYfa%QN%>_A4ZK*EDiO@zBJdCiSi#b` zfqPbXGPQ*3yP-=^p~ea--K((yj$dz8N^2Ms5>QYvg<_>V2;?m7C<3W;*PXC7<%;E5 zSK~((PF8iMK(zlqo$8mMwSe@a@sVU~{asHNSP97Tx^YYV$%8F-K z@DUS0qbZQzp@qj*n2#(RR1OJS0T_|<)PzXrIwEzMIeTQKrJXfFP#BT1 zB&g4KE)dKb&hZ9~a_|uoxAh4uwuY<+vCEc%!bUIHv=GyXQ;7411`@ zTiTB@Tq&j+kpXdA2h}SrxW+S^Rbtm0--m{}` zO~7kFyQx_}122=OP|rd;MRj?Yh>GE6GJEHSHd*u<5V(&%6-eYL3L?iZmw;~+e3kLY zV-Tp=k2vxPKtxcn#tA9{`4rfxO$maE`M9}(y~eZ%02EBz=9z7}KZHy9l=4z#VO=X0 z<~N(I8M3Z>F@nYnTnGwyOT2)bZ;%(6jkgnTQCVDxcj96u6Uf%6!}5a>U@J>DM+OL+ zBVX-+v*K<|*D0Qy9Nmi>wd#!GyO6pK$y1~5c#XGdISK@r!Q^WYyJaeJLAsSG^QJU` zrV>160ymNiB?5?uMI45ppU@gR3@v1{N0}DmE_h*hEaYTkWEHfq6WTLaTx?u48ilOf zrftiXYaX564THERdHL>B?F*fcN!gU6$7fsaL!-$0$m!DjV>30kMTU}^Es2yA0P!Lk zRGOoXGwr&?+f(M=zGIViy!|J%WpyFLmhs~p+AcEr22EpEAE=U4wOVEe>2GtIt5!v` zR;{MFX0TNm&q0#3f~o1y^HUR-l-8^)CGUK0Q(IjpC8cAj9?S+R+f=z_WnlTg8e6Fs z2Et7<#6LMCout&M$KY)uDmxZV=tw%C3=RpOv&@g~>RGU(t3j3~wiQrP&X(aOX-=7d zI@#qNRvS*;tyb4)XVqBkJ@uxT_T7E7tbp#QMu!c%v=K8hO8+wGH0%<7_t^XF`|WCB NOJNXo!|s6CzX3~7VPpUR diff --git a/contrast/genfeats.py b/contrast/genfeats.py new file mode 100644 index 0000000..651c9da --- /dev/null +++ b/contrast/genfeats.py @@ -0,0 +1,209 @@ +# -*- coding: utf-8 -*- +""" +Created on Sun Nov 3 12:05:19 2024 + +@author: ym +""" +import os +import time +import torch +import pickle +import numpy as np +from config import config as conf +from model import resnet18 as resnet18 +from feat_inference import inference_image + + +IMG_FORMAT = ['.bmp', '.jpg', '.jpeg', '.png'] + +'''======= 0. 配置特征提取模型地址 =======''' +model_path = conf.test_model +model_path = r"D:\exhibition\ckpt\zhanting.pth" + +##============ load resnet mdoel +model = resnet18().to(conf.device) +# model = nn.DataParallel(model).to(conf.device) +model.load_state_dict(torch.load(model_path, map_location=conf.device)) +model.eval() +print('load model {} '.format(conf.testbackbone)) + +def get_std_barcodeDict(bcdpath, savepath): + ''' + inputs: + bcdpath: 已清洗的barcode样本图像,如果barcode下有'base'文件夹,只选用该文件夹下图像 + (default = r'\\192.168.1.28\share\已标注数据备份\对比数据\barcode\barcode_1771') + 功能: + 生成并保存只有一个key值的字典 {barcode: [imgpath1, imgpath1, ...]}, + savepath: 字典存储地址,文件名格式:barcode.pickle + ''' + + # savepath = r'\\192.168.1.28\share\测试_202406\contrast\std_barcodes' + + '''读取数据集中 barcode 列表''' + stdBarcodeList = [] + for filename in os.listdir(bcdpath): + filepath = os.path.join(bcdpath, filename) + # if not os.path.isdir(filepath) or not filename.isdigit() or len(filename)<8: + # continue + stdBarcodeList.append(filename) + + bcdPaths = [(barcode, os.path.join(bcdpath, barcode)) for barcode in stdBarcodeList] + + '''遍历数据集,针对每一个barcode,生成并保存字典{barcode: [imgpath1, imgpath1, ...]}''' + k = 0 + errbarcodes = [] + for barcode, bpath in bcdPaths: + pickpath = os.path.join(savepath, f"{barcode}.pickle") + if os.path.isfile(pickpath): + continue + + stdBarcodeDict = {} + stdBarcodeDict[barcode] = [] + for root, dirs, files in os.walk(bpath): + imgpaths = [] + if "base" in dirs: + broot = os.path.join(root, "base") + for imgname in os.listdir(broot): + imgpath = os.path.join(broot, imgname) + file, ext = os.path.splitext(imgpath) + + if ext not in IMG_FORMAT: + continue + imgpaths.append(imgpath) + + stdBarcodeDict[barcode].extend(imgpaths) + break + + else: + for imgname in files: + imgpath = os.path.join(root, imgname) + _, ext = os.path.splitext(imgpath) + if ext not in IMG_FORMAT: continue + imgpaths.append(imgpath) + stdBarcodeDict[barcode].extend(imgpaths) + + pickpath = os.path.join(savepath, f"{barcode}.pickle") + with open(pickpath, 'wb') as f: + pickle.dump(stdBarcodeDict, f) + print(f"Barcode: {barcode}") + + # k += 1 + # if k == 10: + # break + print(f"Len of errbarcodes: {len(errbarcodes)}") + return + + + +def stdfeat_infer(imgPath, featPath, bcdSet=None): + ''' + inputs: + imgPath: 该文件夹下的 pickle 文件格式 {barcode: [imgpath1, imgpath1, ...]} + featPath: imgPath图像对应特征的存储地址 + 功能: + 对 imgPath中图像进行特征提取,生成只有一个key值的字典, + {barcode: features},features.shape=(nsample, 256),并保存至 featPath 中 + + ''' + + # imgPath = r"\\192.168.1.28\share\测试_202406\contrast\std_barcodes" + # featPath = r"\\192.168.1.28\share\测试_202406\contrast\std_features" + stdBarcodeDict = {} + stdBarcodeDict_ft16 = {} + + + '''4处同名: (1)barcode原始图像文件夹; (2)imgPath中的 .pickle 文件名、该pickle文件中字典的key值''' + + k = 0 + for filename in os.listdir(imgPath): + bcd, ext = os.path.splitext(filename) + pkpath = os.path.join(featPath, f"{bcd}.pickle") + + if os.path.isfile(pkpath): continue + if bcdSet is not None and bcd not in bcdSet: + continue + + filepath = os.path.join(imgPath, filename) + + stdbDict = {} + stdbDict_ft16 = {} + stdbDict_uint8 = {} + + t1 = time.time() + + try: + with open(filepath, 'rb') as f: + bpDict = pickle.load(f) + for barcode, imgpaths in bpDict.items(): + # feature = batch_inference(imgpaths, 8) #from vit distilled model of LiChen + feature = inference_image(imgpaths, conf.test_transform, model, conf.device) + feature /= np.linalg.norm(feature, axis=1)[:, None] + + # float16 + feature_ft16 = feature.astype(np.float16) + feature_ft16 /= np.linalg.norm(feature_ft16, axis=1)[:, None] + + # uint8, 两种策略,1) 精度损失小, 2) 计算复杂度小 + # feature_uint8, _ = ft16_to_uint8(feature_ft16) + feature_uint8 = (feature_ft16*128).astype(np.int8) + + except Exception as e: + print(f"Error accured at: {filename}, with Exception is: {e}") + + '''================ 保存单个barcode特征 ================''' + ##================== float32 + stdbDict["barcode"] = barcode + stdbDict["imgpaths"] = imgpaths + stdbDict["feats_ft32"] = feature + stdbDict["feats_ft16"] = feature_ft16 + stdbDict["feats_uint8"] = feature_uint8 + + with open(pkpath, 'wb') as f: + pickle.dump(stdbDict, f) + + stdBarcodeDict[barcode] = feature + stdBarcodeDict_ft16[barcode] = feature_ft16 + + t2 = time.time() + print(f"Barcode: {barcode}, need time: {t2-t1:.1f} secs") + # k += 1 + # if k == 10: + # break + + ##================== float32 + # pickpath = os.path.join(featPath, f"barcode_features_{k}.pickle") + # with open(pickpath, 'wb') as f: + # pickle.dump(stdBarcodeDict, f) + + ##================== float16 + # pickpath_ft16 = os.path.join(featPath, f"barcode_features_ft16_{k}.pickle") + # with open(pickpath_ft16, 'wb') as f: + # pickle.dump(stdBarcodeDict_ft16, f) + + return + + + +def genfeatures(imgpath, bcdpath, featpath): + + get_std_barcodeDict(imgpath, bcdpath) + stdfeat_infer(bcdpath, featpath, bcdSet=None) + + print(f"Features have generated, saved in: {featpath}") + + + + + + +def main(): + imgpath = r"\\192.168.1.28\share\展厅barcode数据\整理\zhantingBase" + bcdpath = r"D:\exhibition\dataset\bcdpath" + featpath = r"D:\exhibition\dataset\feats" + + genfeatures(imgpath, bcdpath, featpath) + + + +if __name__ == '__main__': + main() diff --git a/contrast/one2n_contrast.py b/contrast/one2n_contrast.py index 8b7a6db..0c3ae93 100644 --- a/contrast/one2n_contrast.py +++ b/contrast/one2n_contrast.py @@ -16,60 +16,13 @@ import shutil import numpy as np import matplotlib.pyplot as plt import cv2 - +from pathlib import Path import sys sys.path.append(r"D:\DetectTracking") from tracking.utils.plotting import Annotator, colors -from tracking.utils.read_data import extract_data, read_deletedBarcode_file, read_tracking_output +from tracking.utils.read_data import extract_data, read_deletedBarcode_file, read_tracking_output, read_returnGoods_file from tracking.utils.plotting import draw_tracking_boxes - - - -def showHist(err, correct): - err = np.array(err) - correct = np.array(correct) - - fig, axs = plt.subplots(2, 1) - axs[0].hist(err, bins=50, edgecolor='black') - axs[0].set_xlim([0, 1]) - axs[0].set_title('err') - - axs[1].hist(correct, bins=50, edgecolor='black') - axs[1].set_xlim([0, 1]) - axs[1].set_title('correct') - # plt.show() - - return plt - -def show_recall_prec(recall, prec, ths): - # x = np.linspace(start=-0, stop=1, num=11, endpoint=True).tolist() - fig = plt.figure(figsize=(10, 6)) - plt.plot(ths, recall, color='red', label='recall') - plt.plot(ths, prec, color='blue', label='PrecisePos') - plt.legend() - plt.xlabel(f'threshold') - # plt.ylabel('Similarity') - plt.grid(True, linestyle='--', alpha=0.5) - # plt.savefig('accuracy_recall_grid.png') - # plt.show() - # plt.close() - - return plt - - -def compute_recall_precision(err_similarity, correct_similarity): - ths = np.linspace(0, 1, 51) - recall, prec = [], [] - for th in ths: - TP = len([num for num in correct_similarity if num >= th]) - FP = len([num for num in err_similarity if num >= th]) - if (TP+FP) == 0: - prec.append(1) - recall.append(0) - else: - prec.append(TP / (TP + FP)) - recall.append(TP / (len(err_similarity) + len(correct_similarity))) - return recall, prec, ths +from contrast.utils.tools import showHist, show_recall_prec, compute_recall_precision # ============================================================================= @@ -129,7 +82,7 @@ def read_tracking_imgs(imgspath): return imgs_0, imgs_1 - + # ============================================================================= # def draw_tracking_boxes(imgs, tracks): # '''tracks: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index] @@ -287,27 +240,9 @@ def save_tracking_imgpairs(pair, basepath, savepath): cv2.imwrite(imgpath, img) -# def performance_evaluate(all_list, isshow=False): - -# corrpairs, correct_barcode_list, correct_similarity, errpairs, err_barcode_list, err_similarity = [], [], [], [], [], [] -# for s_list in all_list: -# seqdir = s_list['SeqDir'].strip() -# delete = s_list['Deleted'].strip() -# barcodes = [s.strip() for s in s_list['barcode']] -# similarity = [float(s.strip()) for s in s_list['similarity']] - -# if delete in barcodes[:1]: -# corrpairs.append((seqdir, delete)) -# correct_barcode_list.append(delete) -# correct_similarity.append(similarity[0]) -# else: -# errpairs.append((seqdir, delete, barcodes[0])) -# err_barcode_list.append(delete) -# err_similarity.append(similarity[0]) -def performance_evaluate(all_list, isshow=False): - - corrpairs, correct_barcode_list, correct_similarity, errpairs, err_barcode_list, err_similarity = [], [], [], [], [], [] +def one2n_old(all_list): + corrpairs, errpairs, correct_similarity, err_similarity = [], [], [], [] for s_list in all_list: seqdir = s_list['SeqDir'].strip() delete = s_list['Deleted'].strip() @@ -332,70 +267,136 @@ def performance_evaluate(all_list, isshow=False): matched_barcode = barcodes[index] if matched_barcode == delete: corrpairs.append((seqdir, delete)) - correct_barcode_list.append(delete) correct_similarity.append(max(similarity)) else: errpairs.append((seqdir, delete, matched_barcode)) - err_barcode_list.append(delete) err_similarity.append(max(similarity)) - '''3. 计算比对性能 ''' - if isshow: - recall, prec, ths = compute_recall_precision(err_similarity, correct_similarity) - show_recall_prec(recall, prec, ths) - showHist(err_similarity, correct_similarity) - return errpairs, corrpairs, err_similarity, correct_similarity - + return corrpairs, errpairs, correct_similarity, err_similarity -def contrast_analysis(del_barcode_file, basepath, savepath, saveimgs=False): - ''' - del_barcode_file: 测试数据文件,利用该文件进行算法性能分析 + +def one2n_new(all_list): + corrpairs, correct_similarity, errpairs, err_similarity = [], [], [], [] + for s_list in all_list: + seqdir = s_list['SeqDir'].strip() + delete = s_list['Deleted'].strip() + barcodes = [s.strip() for s in s_list['barcode']] + events = [s.strip() for s in s_list['event']] + types = [s.strip() for s in s_list['type']] + + ## =================== 读入相似度值 + similarity_comp, similarity_front = [], [] + for simil in s_list['similarity']: + ss = [float(s.strip()) for s in simil.split(',')] + + similarity_comp.append(ss[0]) + if len(ss)==3: + similarity_front.append(ss[2]) + + if len(similarity_front): + similarity = [s for s in similarity_front] + else: + similarity = [s for s in similarity_comp] + + + index = similarity.index(max(similarity)) + matched_barcode = barcodes[index] + if matched_barcode == delete: + corrpairs.append((seqdir, events[index])) + correct_similarity.append(max(similarity)) + else: + idx = [i for i, name in enumerate(events) if name.split('_')[-1] == delete] + idxmax, simimax = -1, -1 + # idxmax, simimax = k, similarity[k] for k in idx if similarity[k] > simimax + for k in idx: + if similarity[k] > simimax: + idxmax = k + simimax = similarity[k] + + errpairs.append((seqdir, events[idxmax], events[index])) + err_similarity.append(max(similarity)) + + return errpairs, corrpairs, err_similarity, correct_similarity + + +# def contrast_analysis(del_barcode_file, basepath, savepath, saveimgs=False): +def get_relative_paths(del_barcode_file, basepath, savepath, saveimgs=False): ''' + del_barcode_file: + deletedBarcode.txt 格式的 1:n 数据结果文件 + returnGoods.txt格式数据文件不需要调用该函数,one2n_old() 函数返回的 errpairs + 中元素为三元元组(取出,放入, 错误匹配) + ''' + relative_paths = [] '''1. 读取 deletedBarcode 文件 ''' all_list = read_deletedBarcode_file(del_barcode_file) - + '''2. 算法性能评估,并输出 (取出,删除, 错误匹配) 对 ''' - errpairs, corrpairs, _, _ = performance_evaluate(all_list) - - '''3. 获取 (取出,删除, 错误匹配) 对应路径,保存相应轨迹图像''' - relative_paths = [] + errpairs, corrpairs, _, _ = one2n_old(all_list) + + '''3. 构造事件组合(取出,放入并删除, 错误匹配) 对应路径 ''' for errpair in errpairs: GetoutPath, InputPath, ErrorPath = get_contrast_paths(errpair, basepath) relative_paths.append((GetoutPath, InputPath, ErrorPath)) - + + '''3. 获取 (取出,放入并删除, 错误匹配) 对应路径,保存相应轨迹图像''' if saveimgs: save_tracking_imgpairs(errpair, basepath, savepath) return relative_paths -def contrast_loop(fpath): +def one2n_test(): + fpath = r'\\192.168.1.28\share\测试_202406\deletedBarcode\other' + fpath = r'\\192.168.1.28\share\测试_202406\1030\images' + savepath = r'\\192.168.1.28\share\测试_202406\deletedBarcode\illustration' - # savepath = r'D:\contrast\dataset\1_to_n\illustration' if not os.path.exists(savepath): os.mkdir(savepath) - - if os.path.isfile(fpath): - fpath, filename = os.path.split(fpath) + if os.path.isdir(fpath): + filepaths = [os.path.join(fpath, f) for f in os.listdir(fpath) + if f.find('.txt')>0 + and (f.find('deletedBarcode')>=0 or f.find('returnGoods')>=0)] + elif os.path.isfile(fpath): + filepaths = [fpath] + else: + return + + + FileFormat = {} + BarLists, blists = {}, [] - for filename in os.listdir(fpath): - file = os.path.splitext(filename)[0][15:] - - filepath = os.path.join(fpath, filename) - blist = read_deletedBarcode_file(filepath) + for pth in filepaths: + file = str(Path(pth).stem) + if file.find('deletedBarcode')>=0: + FileFormat[file] = 'deletedBarcode' + blist = read_deletedBarcode_file(pth) + elif file.find('returnGoods')>=0: + FileFormat[file] = 'returnGoods' + blist = read_returnGoods_file(pth) + else: + return + BarLists.update({file: blist}) blists.extend(blist) BarLists.update({file: blist}) BarLists.update({"Total": blists}) - for file, blist in BarLists.items(): - errpairs, corrpairs, err_similarity, correct_similarity = performance_evaluate(blist) + + for file, blist in BarLists.items(): + if FileFormat[file] == 'deletedBarcode': + _, _, err_similarity, correct_similarity = one2n_old(blist) + elif FileFormat[file] == 'returnGoods': + _, _, err_similarity, correct_similarity = one2n_new(blist) + else: + _, _, err_similarity, correct_similarity = one2n_old(blist) + recall, prec, ths = compute_recall_precision(err_similarity, correct_similarity) @@ -411,25 +412,33 @@ def contrast_loop(fpath): # plt.close() -def main(): - fpath = r'\\192.168.1.28\share\测试_202406\deletedBarcode\other' - contrast_loop(fpath) -def main1(): + +def test_getreltpath(): + ''' + 适用于:deletedBarcode.txt,不适用于:returnGoods.txt + ''' + del_barcode_file = r'\\192.168.1.28\share\测试_202406\709\deletedBarcode.txt' basepath = r'\\192.168.1.28\share\测试_202406\709' - savepath = r'D:\contrast\dataset\result' + # del_barcode_file = r'\\192.168.1.28\share\测试_202406\1030\images\returnGoods.txt' + # basepath = r'\\192.168.1.28\share\测试_202406\1030\images' + + savepath = r'D:\contrast\dataset\result' + saveimgs = True try: - relative_path = contrast_analysis(del_barcode_file, basepath, savepath) + relative_path = get_relative_paths(del_barcode_file, basepath, savepath, saveimgs) except Exception as e: print(f'Error Type: {e}') - + if __name__ == '__main__': - main() - # main1() + one2n_test() + + # test_getreltpath() + diff --git a/contrast/one2one_contrast.py b/contrast/one2one_contrast.py index 8730da9..27f4b20 100644 --- a/contrast/one2one_contrast.py +++ b/contrast/one2one_contrast.py @@ -239,71 +239,71 @@ def creat_shopping_event(eventPath, subimgPath=False): return event -def get_std_barcodeDict(bcdpath, savepath): - ''' - inputs: - bcdpath: 已清洗的barcode样本图像,如果barcode下有'base'文件夹,只选用该文件夹下图像 - (default = r'\\192.168.1.28\share\已标注数据备份\对比数据\barcode\barcode_1771') - 功能: - 生成并保存只有一个key值的字典 {barcode: [imgpath1, imgpath1, ...]}, - savepath: 字典存储地址,文件名格式:barcode.pickle - ''' +# def get_std_barcodeDict(bcdpath, savepath): +# ''' +# inputs: +# bcdpath: 已清洗的barcode样本图像,如果barcode下有'base'文件夹,只选用该文件夹下图像 +# (default = r'\\192.168.1.28\share\已标注数据备份\对比数据\barcode\barcode_1771') +# 功能: +# 生成并保存只有一个key值的字典 {barcode: [imgpath1, imgpath1, ...]}, +# savepath: 字典存储地址,文件名格式:barcode.pickle +# ''' - # savepath = r'\\192.168.1.28\share\测试_202406\contrast\std_barcodes' +# # savepath = r'\\192.168.1.28\share\测试_202406\contrast\std_barcodes' - '''读取数据集中 barcode 列表''' - stdBarcodeList = [] - for filename in os.listdir(bcdpath): - filepath = os.path.join(bcdpath, filename) - # if not os.path.isdir(filepath) or not filename.isdigit() or len(filename)<8: - # continue - stdBarcodeList.append(filename) +# '''读取数据集中 barcode 列表''' +# stdBarcodeList = [] +# for filename in os.listdir(bcdpath): +# filepath = os.path.join(bcdpath, filename) +# # if not os.path.isdir(filepath) or not filename.isdigit() or len(filename)<8: +# # continue +# stdBarcodeList.append(filename) - bcdPaths = [(barcode, os.path.join(bcdpath, barcode)) for barcode in stdBarcodeList] +# bcdPaths = [(barcode, os.path.join(bcdpath, barcode)) for barcode in stdBarcodeList] - '''遍历数据集,针对每一个barcode,生成并保存字典{barcode: [imgpath1, imgpath1, ...]}''' - k = 0 - errbarcodes = [] - for barcode, bpath in bcdPaths: - pickpath = os.path.join(savepath, f"{barcode}.pickle") - if os.path.isfile(pickpath): - continue +# '''遍历数据集,针对每一个barcode,生成并保存字典{barcode: [imgpath1, imgpath1, ...]}''' +# k = 0 +# errbarcodes = [] +# for barcode, bpath in bcdPaths: +# pickpath = os.path.join(savepath, f"{barcode}.pickle") +# if os.path.isfile(pickpath): +# continue - stdBarcodeDict = {} - stdBarcodeDict[barcode] = [] - for root, dirs, files in os.walk(bpath): - imgpaths = [] - if "base" in dirs: - broot = os.path.join(root, "base") - for imgname in os.listdir(broot): - imgpath = os.path.join(broot, imgname) - file, ext = os.path.splitext(imgpath) +# stdBarcodeDict = {} +# stdBarcodeDict[barcode] = [] +# for root, dirs, files in os.walk(bpath): +# imgpaths = [] +# if "base" in dirs: +# broot = os.path.join(root, "base") +# for imgname in os.listdir(broot): +# imgpath = os.path.join(broot, imgname) +# file, ext = os.path.splitext(imgpath) - if ext not in IMG_FORMAT: - continue - imgpaths.append(imgpath) +# if ext not in IMG_FORMAT: +# continue +# imgpaths.append(imgpath) - stdBarcodeDict[barcode].extend(imgpaths) - break +# stdBarcodeDict[barcode].extend(imgpaths) +# break - else: - for imgname in files: - imgpath = os.path.join(root, imgname) - _, ext = os.path.splitext(imgpath) - if ext not in IMG_FORMAT: continue - imgpaths.append(imgpath) - stdBarcodeDict[barcode].extend(imgpaths) +# else: +# for imgname in files: +# imgpath = os.path.join(root, imgname) +# _, ext = os.path.splitext(imgpath) +# if ext not in IMG_FORMAT: continue +# imgpaths.append(imgpath) +# stdBarcodeDict[barcode].extend(imgpaths) - pickpath = os.path.join(savepath, f"{barcode}.pickle") - with open(pickpath, 'wb') as f: - pickle.dump(stdBarcodeDict, f) - print(f"Barcode: {barcode}") +# pickpath = os.path.join(savepath, f"{barcode}.pickle") +# with open(pickpath, 'wb') as f: +# pickle.dump(stdBarcodeDict, f) +# print(f"Barcode: {barcode}") - # k += 1 - # if k == 10: - # break - print(f"Len of errbarcodes: {len(errbarcodes)}") - return +# # k += 1 +# # if k == 10: +# # break +# print(f"Len of errbarcodes: {len(errbarcodes)}") +# return def save_event_subimg(event, savepath): ''' @@ -355,92 +355,92 @@ def batch_inference(imgpaths, batch): features = np.concatenate(features, axis=0) return features -def stdfeat_infer(imgPath, featPath, bcdSet=None): - ''' - inputs: - imgPath: 该文件夹下的 pickle 文件格式 {barcode: [imgpath1, imgpath1, ...]} - featPath: imgPath图像对应特征的存储地址 - 功能: - 对 imgPath中图像进行特征提取,生成只有一个key值的字典, - {barcode: features},features.shape=(nsample, 256),并保存至 featPath 中 +# def stdfeat_infer(imgPath, featPath, bcdSet=None): +# ''' +# inputs: +# imgPath: 该文件夹下的 pickle 文件格式 {barcode: [imgpath1, imgpath1, ...]} +# featPath: imgPath图像对应特征的存储地址 +# 功能: +# 对 imgPath中图像进行特征提取,生成只有一个key值的字典, +# {barcode: features},features.shape=(nsample, 256),并保存至 featPath 中 - ''' +# ''' - # imgPath = r"\\192.168.1.28\share\测试_202406\contrast\std_barcodes" - # featPath = r"\\192.168.1.28\share\测试_202406\contrast\std_features" - stdBarcodeDict = {} - stdBarcodeDict_ft16 = {} +# # imgPath = r"\\192.168.1.28\share\测试_202406\contrast\std_barcodes" +# # featPath = r"\\192.168.1.28\share\测试_202406\contrast\std_features" +# stdBarcodeDict = {} +# stdBarcodeDict_ft16 = {} - '''4处同名: (1)barcode原始图像文件夹; (2)imgPath中的 .pickle 文件名、该pickle文件中字典的key值''' +# '''4处同名: (1)barcode原始图像文件夹; (2)imgPath中的 .pickle 文件名、该pickle文件中字典的key值''' - k = 0 - for filename in os.listdir(imgPath): - bcd, ext = os.path.splitext(filename) - pkpath = os.path.join(featPath, f"{bcd}.pickle") +# k = 0 +# for filename in os.listdir(imgPath): +# bcd, ext = os.path.splitext(filename) +# pkpath = os.path.join(featPath, f"{bcd}.pickle") - if os.path.isfile(pkpath): continue - if bcdSet is not None and bcd not in bcdSet: - continue +# if os.path.isfile(pkpath): continue +# if bcdSet is not None and bcd not in bcdSet: +# continue - filepath = os.path.join(imgPath, filename) +# filepath = os.path.join(imgPath, filename) - stdbDict = {} - stdbDict_ft16 = {} - stdbDict_uint8 = {} +# stdbDict = {} +# stdbDict_ft16 = {} +# stdbDict_uint8 = {} - t1 = time.time() +# t1 = time.time() - try: - with open(filepath, 'rb') as f: - bpDict = pickle.load(f) - for barcode, imgpaths in bpDict.items(): - # feature = batch_inference(imgpaths, 8) #from vit distilled model of LiChen - feature = inference_image(imgpaths, conf.test_transform, model, conf.device) - feature /= np.linalg.norm(feature, axis=1)[:, None] +# try: +# with open(filepath, 'rb') as f: +# bpDict = pickle.load(f) +# for barcode, imgpaths in bpDict.items(): +# # feature = batch_inference(imgpaths, 8) #from vit distilled model of LiChen +# feature = inference_image(imgpaths, conf.test_transform, model, conf.device) +# feature /= np.linalg.norm(feature, axis=1)[:, None] - # float16 - feature_ft16 = feature.astype(np.float16) - feature_ft16 /= np.linalg.norm(feature_ft16, axis=1)[:, None] +# # float16 +# feature_ft16 = feature.astype(np.float16) +# feature_ft16 /= np.linalg.norm(feature_ft16, axis=1)[:, None] - # uint8, 两种策略,1) 精度损失小, 2) 计算复杂度小 - # feature_uint8, _ = ft16_to_uint8(feature_ft16) - feature_uint8 = (feature_ft16*128).astype(np.int8) +# # uint8, 两种策略,1) 精度损失小, 2) 计算复杂度小 +# # feature_uint8, _ = ft16_to_uint8(feature_ft16) +# feature_uint8 = (feature_ft16*128).astype(np.int8) - except Exception as e: - print(f"Error accured at: {filename}, with Exception is: {e}") +# except Exception as e: +# print(f"Error accured at: {filename}, with Exception is: {e}") - '''================ 保存单个barcode特征 ================''' - ##================== float32 - stdbDict["barcode"] = barcode - stdbDict["imgpaths"] = imgpaths - stdbDict["feats_ft32"] = feature - stdbDict["feats_ft16"] = feature_ft16 - stdbDict["feats_uint8"] = feature_uint8 +# '''================ 保存单个barcode特征 ================''' +# ##================== float32 +# stdbDict["barcode"] = barcode +# stdbDict["imgpaths"] = imgpaths +# stdbDict["feats_ft32"] = feature +# stdbDict["feats_ft16"] = feature_ft16 +# stdbDict["feats_uint8"] = feature_uint8 - with open(pkpath, 'wb') as f: - pickle.dump(stdbDict, f) +# with open(pkpath, 'wb') as f: +# pickle.dump(stdbDict, f) - stdBarcodeDict[barcode] = feature - stdBarcodeDict_ft16[barcode] = feature_ft16 +# stdBarcodeDict[barcode] = feature +# stdBarcodeDict_ft16[barcode] = feature_ft16 - t2 = time.time() - print(f"Barcode: {barcode}, need time: {t2-t1:.1f} secs") - # k += 1 - # if k == 10: - # break +# t2 = time.time() +# print(f"Barcode: {barcode}, need time: {t2-t1:.1f} secs") +# # k += 1 +# # if k == 10: +# # break - ##================== float32 - # pickpath = os.path.join(featPath, f"barcode_features_{k}.pickle") - # with open(pickpath, 'wb') as f: - # pickle.dump(stdBarcodeDict, f) +# ##================== float32 +# # pickpath = os.path.join(featPath, f"barcode_features_{k}.pickle") +# # with open(pickpath, 'wb') as f: +# # pickle.dump(stdBarcodeDict, f) - ##================== float16 - # pickpath_ft16 = os.path.join(featPath, f"barcode_features_ft16_{k}.pickle") - # with open(pickpath_ft16, 'wb') as f: - # pickle.dump(stdBarcodeDict_ft16, f) +# ##================== float16 +# # pickpath_ft16 = os.path.join(featPath, f"barcode_features_ft16_{k}.pickle") +# # with open(pickpath_ft16, 'wb') as f: +# # pickle.dump(stdBarcodeDict_ft16, f) - return +# return def contrast_performance_evaluate(resultPath): @@ -789,30 +789,28 @@ def main(): compute_precise_recall(pickpath) -def main_std(): - std_sample_path = r"\\192.168.1.28\share\已标注数据备份\对比数据\barcode\barcode_500_2192_已清洗" - std_barcode_path = r"\\192.168.1.28\share\测试_202406\contrast\std_barcodes_2192" - std_feature_path = r"\\192.168.1.28\share\测试_202406\contrast\std_features_2192_ft32vsft16" +# def main_std(): +# std_sample_path = r"\\192.168.1.28\share\已标注数据备份\对比数据\barcode\barcode_500_2192_已清洗" +# std_barcode_path = r"\\192.168.1.28\share\测试_202406\contrast\std_barcodes_2192" +# std_feature_path = r"\\192.168.1.28\share\测试_202406\contrast\std_features_2192_ft32vsft16" - get_std_barcodeDict(std_sample_path, std_barcode_path) - stdfeat_infer(std_barcode_path, std_feature_path, bcdSet=None) +# get_std_barcodeDict(std_sample_path, std_barcode_path) +# stdfeat_infer(std_barcode_path, std_feature_path, bcdSet=None) - # fileList = [] - # for filename in os.listdir(std_barcode_path): - # filepath = os.path.join(std_barcode_path, filename) - # with open(filepath, 'rb') as f: - # bpDict = pickle.load(f) +# # fileList = [] +# # for filename in os.listdir(std_barcode_path): +# # filepath = os.path.join(std_barcode_path, filename) +# # with open(filepath, 'rb') as f: +# # bpDict = pickle.load(f) - # for v in bpDict.values(): - # fileList.append(len(v)) - # print("done") +# # for v in bpDict.values(): +# # fileList.append(len(v)) +# # print("done") if __name__ == '__main__': - # main() - - - main_std() + main() + # main_std() diff --git a/contrast/one2one_onsite.py b/contrast/one2one_onsite.py index af5f6f5..a11769e 100644 --- a/contrast/one2one_onsite.py +++ b/contrast/one2one_onsite.py @@ -1,9 +1,8 @@ # -*- coding: utf-8 -*- """ Created on Wed Sep 11 11:57:30 2024 - -永辉现场 1:1 比对测试 - + 永辉现场试验输出数据的 1:1 性能评估 + 适用于202410前数据保存版本的,需调用 OneToOneCompare.txt @author: ym """ import os @@ -65,14 +64,14 @@ def plot_pr_curve(matrix): axs[1].set_title(f'Cross Barcode, Num: {TPFN_mean}') # plt.savefig(f'./result/{file}_hist.png') # svg, png, pdf - Recall_Pos = [] + Recall_Neg = [] Thresh = np.linspace(-0.2, 1, 100) for th in Thresh: TN = np.sum(simimax < th) - Recall_Pos.append(TN/TPFN_max) + Recall_Neg.append(TN/TPFN_max) fig, ax = plt.subplots() - ax.plot(Thresh, Recall_Pos, 'b', label='Recall_Pos: TP/TPFN') + ax.plot(Thresh, Recall_Neg, 'b', label='Recall_Pos: TP/TPFN') ax.set_xlim([0, 1]) ax.set_ylim([0, 1]) ax.grid(True) @@ -96,9 +95,7 @@ def main(): simiList = [] for fp in filepaths: slist = read_one2one_data(fp) - simiList.extend(slist) - plot_pr_curve(simiList) diff --git a/contrast/utils/__pycache__/__init__.cpython-39.pyc b/contrast/utils/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7e60b079e9cfd398f1240177cfe675f59c8d79c GIT binary patch literal 197 zcmYe~<>g`kf~_0Aq(uVh#~=t%C>WV37+6@D8e5rKC>R+SnQ(DAB$k$B3>MF!n`a;dIUs@xrsx5yMn3iqd`bit-Wzfd2wwy+Hbg^&FouG?B;DrO zM(c%^k(Hv8XPy1gr1NcTJ5T$a{^y(hFE)Gq&PH$JS*!Itn%GgUH#^gDYYx`PZj9D% zngK6uR% zc13TIKr|LBR_V~UbO8I84kCdFQO8z)x;saQ=M;oSG2Z~;{Gp2ClP+`KFwKl>N|DGo zSGjgvYV8_vuCq0Xb7Am*Kd85?eTH3*>O2@8=Gn-l^yPgiw zUK366qcm|mIyKiszYD-$ZrfP%=VEYESB?S zcB&-AK^3E@8)&4%1z>BUrSmWxE>JR&8Q45s*hq9LAP^tmQLg|~j@yyZ_h4Pcp%%!g zTS1x!wX*{}x(29O20>UGh+d^3lJ4s+Aa4~w0|4wuzWEIV761}0_!XbAgd~KK8J~G2 z+aa%h_!*M&M{-mp@td%uJLH_6)0w|Op;!SW3p6PGGI$8ZeZq8BdQe1*!oYIE1b@AT zD4eR!eF$ZTWrK)?@Sa2Am2mq*Qt!ZNUVyF94U-$S|LdLp44qj=aqE^wU-}54fK0(Wb|GY&DWVuM zSjo%O<5WdDwNtmccp~m#`cu3O6Hb4Ecd`9F$G+aHO;*M$Iv)Mk zpFajA^8f^)%TVSaeE?q@W^J}g*QovycDha17z{)n4+hQ$vWsc$_q}msu?|!^gomT9 qh(r<6KOX+{W-5WW)1A+V;YgaOEF!28S~M literal 0 HcmV?d00001 diff --git a/contrast/utils/tools.py b/contrast/utils/tools.py new file mode 100644 index 0000000..7abdb5e --- /dev/null +++ b/contrast/utils/tools.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +""" +Created on Thu Oct 31 15:17:01 2024 + +@author: ym +""" +import numpy as np +import matplotlib.pyplot as plt + + + +def showHist(err, correct): + err = np.array(err) + correct = np.array(correct) + + fig, axs = plt.subplots(2, 1) + axs[0].hist(err, bins=50, edgecolor='black') + axs[0].set_xlim([0, 1]) + axs[0].set_title('err') + + axs[1].hist(correct, bins=50, edgecolor='black') + axs[1].set_xlim([0, 1]) + axs[1].set_title('correct') + # plt.show() + + return plt + +def show_recall_prec(recall, prec, ths): + # x = np.linspace(start=-0, stop=1, num=11, endpoint=True).tolist() + fig = plt.figure(figsize=(10, 6)) + plt.plot(ths, recall, color='red', label='recall') + plt.plot(ths, prec, color='blue', label='PrecisePos') + plt.legend() + plt.xlabel(f'threshold') + # plt.ylabel('Similarity') + plt.grid(True, linestyle='--', alpha=0.5) + # plt.savefig('accuracy_recall_grid.png') + # plt.show() + # plt.close() + + return plt + + +def compute_recall_precision(err_similarity, correct_similarity): + ths = np.linspace(0, 1, 51) + recall, prec = [], [] + for th in ths: + TP = len([num for num in correct_similarity if num >= th]) + FP = len([num for num in err_similarity if num >= th]) + if (TP+FP) == 0: + prec.append(1) + recall.append(0) + else: + prec.append(TP / (TP + FP)) + recall.append(TP / (len(err_similarity) + len(correct_similarity))) + return recall, prec, ths \ No newline at end of file diff --git a/event_time_specify.py b/event_time_specify.py new file mode 100644 index 0000000..8a421da --- /dev/null +++ b/event_time_specify.py @@ -0,0 +1,301 @@ +# -*- coding: utf-8 -*- +""" +Created on Thu Oct 10 11:01:39 2024 + +@author: ym +""" +import os +import numpy as np +# from matplotlib.pylab import mpl +# mpl.use('Qt5Agg') +import matplotlib.pyplot as plt +from move_detect import MoveDetect + + +import sys +sys.path.append(r"D:\DetectTracking") + +# from tracking.utils.read_data import extract_data, read_deletedBarcode_file, read_tracking_output, read_weight_timeConsuming + + +from tracking.utils.read_data import read_weight_timeConsuming + +def str_to_float_arr(s): + # 移除字符串末尾的逗号(如果存在) + if s.endswith(','): + s = s[:-1] + + # 使用split()方法分割字符串,然后将每个元素转化为float + float_array = [float(x) for x in s.split(",")] + return float_array + +def find_samebox_in_array(arr, target): + + for i, st in enumerate(arr): + if st[:4] == target[:4]: + return i + return -1 + +def array2frame(bboxes): + frameID = np.sort(np.unique(bboxes[:, 7].astype(int))) + # frame_ids = bboxes[:, frameID].astype(int) + fboxes, ttamps = [], [] + for fid in frameID: + idx = np.where(bboxes[:, 7] == fid)[0] + box = bboxes[idx, :] + + fboxes.append(box) + ttamps.append(int(box[0, 9])) + + frameTstamp = np.concatenate((frameID[:,None], np.array(ttamps)[:,None]), axis=1) + + return fboxes, frameTstamp + + +def extract_data_1(datapath): + ''' + 要求每一帧(包括最后一帧)输出数据后有一空行作为分割行,该分割行为标志行 + ''' + + trackerboxes = np.empty((0, 10), dtype=np.float64) + trackerfeats = np.empty((0, 256), dtype=np.float64) + + boxes, feats, tboxes, tfeats = [], [], [], [] + timestamp = -1 + + newframe = False + with open(datapath, 'r', encoding='utf-8') as lines: + for line in lines: + if line.find("CameraId")>=0: + newframe = True + timestamp, frameId = [int(ln.split(":")[1]) for ln in line.split(",")[1:]] + # boxes, feats, tboxes, tfeats = [], [], [], [] + + + if line.find("box:") >= 0 and line.find("output_box:") < 0: + line = line.strip() + box = line[line.find("box:") + 4:].strip() + # if len(box)==6: + boxes.append(str_to_float_arr(box)) + + if line.find("feat:") >= 0: + line = line.strip() + feat = line[line.find("feat:") + 5:].strip() + # if len(feat)==256: + feats.append(str_to_float_arr(feat)) + + if line.find("output_box:") >= 0: + line = line.strip() + # 确保 boxes 和 feats 一一对应,并可以保证 tboxes 和 tfeats 一一对应 + if len(boxes)==0 or len(boxes)!=len(feats): + continue + + box = str_to_float_arr(line[line.find("output_box:") + 11:].strip()) + + box.append(timestamp) + index = find_samebox_in_array(boxes, box) + if index >= 0: + tboxes.append(box) # 去掉'output_box:'并去除可能的空白字符 + + # feat_f = str_to_float_arr(input_feats[index]) + feat_f = feats[index] + norm_f = np.linalg.norm(feat_f) + feat_f = feat_f / norm_f + tfeats.append(feat_f) + + '''标志行(空行)判断''' + condt = line.find("timestamp")<0 and line.find("box:")<0 and line.find("feat:")<0 + if condt and newframe: + if len(tboxes) and len(tfeats): + trackerboxes = np.concatenate((trackerboxes, np.array(tboxes))) + trackerfeats = np.concatenate((trackerfeats, np.array(tfeats))) + + timestamp = -1 + boxes, feats, tboxes, tfeats = [], [], [], [] + newframe = False + + return trackerboxes, trackerfeats + + +def devide_motion_state(tboxes, width): + '''frameTstamp: 用于标记当前相机视野内用购物车运动状态变化''' + + periods = [] + if len(tboxes) < width: + return periods + + fboxes, frameTstamp = array2frame(tboxes) + + fnum = len(frameTstamp) + if fnum < width: return periods + + state = np.zeros((fnum, 2), dtype=np.int64) + frameState = np.concatenate((frameTstamp, state), axis = 1).astype(np.int64) + + mtrackFid = {} + '''frameState 标记由图像判断的购物车状态:0: 静止,1: 运动''' + for idx in range(width, fnum+1): + lboxes = np.concatenate(fboxes[idx-width:idx], axis = 0) + + md = MoveDetect(lboxes) + md.classify() + + # if idx==60: + # print('a') + + ## track.during 二元素组, 表征在该时间片段内,轨迹 track 的起止时间,数值用 boxes[:, 7] + for track in md.track_motion: + if track.cls == 0: continue + + + + f1, f2 = track.during + + + + idx1 = set(np.where(frameState[:,0] >= f1)[0]) + idx2 = set(np.where(frameState[:,0] <= f2)[0]) + idx3 = list(idx1.intersection(idx2)) + + if track.tid not in mtrackFid: + mtrackFid[track.tid] = set(idx3) + else: + mtrackFid[track.tid] = mtrackFid[track.tid].union(set(idx3)) + + frameState[idx-1, 3] = 1 + frameState[idx3, 2] = 1 + + '''状态变化输出''' + + for tid, fid in mtrackFid.items(): + fstate = np.zeros((fnum, 1), dtype=np.int64) + fstate[list(fid), 0] = tid + + frameState = np.concatenate((frameState, fstate), axis = 1).astype(np.int64) + + + + + + + return frameState + + +def state_measure(periods, weights, spath=None): + '''两种状态:static、motion, + (t0, t1) + t0: static ----> motion + t1: motion ----> static + ''' + + PrevState = 'static' + CuurState = 'static' + + camtype_0, frstate_0 = periods[0] + camtype_1, frstate_1 = periods[1] + + '''计算总时间区间: tmin, tmax, during''' + tmin_w, tmax_w = np.min(weights[:, 0]), np.max(weights[:, 0]) + tmin_0, tmax_0 = np.min(frstate_0[:, 1]), np.max(frstate_0[:, 1]) + tmin_1, tmax_1 = np.min(frstate_1[:, 1]), np.max(frstate_1[:, 1]) + + tmin = min([tmin_w, tmin_0, tmin_1]) + tmax = max([tmax_w, tmax_0, tmax_1]) + + # for ctype, tboxes, _ in tracker_boxes: + # t_min, t_max = np.min(tboxes[:, 9]), np.max(tboxes[:, 9]) + # if t_mintmax: + # tmax = t_max + # during = tmax - tmin + + + + fig, (ax1, ax2, ax3) = plt.subplots(3, 1) + + ax1.plot(weights[:, 0] - tmin, weights[:, 1], 'bo-', linewidth=1, markersize=4) + # ax1.set_xlim([0, during]) + ax1.set_title('Weight (g)') + + ax2.plot(frstate_0[:, 1] - tmin, frstate_0[:, 2], 'rx-', linewidth=1, markersize=8) + ax2.plot(frstate_0[:, 1] - tmin, frstate_0[:, 3], 'bo-', linewidth=1, markersize=4) + # ax2.set_xlim([0, during]) + ax2.set_title(f'Camera: {int(camtype_0)}') + + ax3.plot(frstate_1[:, 1] - tmin, frstate_1[:, 2], 'rx-', linewidth=1, markersize=8) + ax3.plot(frstate_1[:, 1] - tmin, frstate_1[:, 3], 'bo-', linewidth=1, markersize=4) + ax3.set_title(f'Camera: {int(camtype_1)}') + + if spath: + plt.savefig(spath) + plt.show() + +def read_yolo_weight_data(eventdir): + filepaths = [] + for filename in os.listdir(eventdir): + file, ext = os.path.splitext(filename) + if ext =='.data': + filepath = os.path.join(eventdir, filename) + filepaths.append(filepath) + + if len(filepaths) != 5: + return + + tracker_boxes = [] + WeightDict, SensorDict, ProcessTimeDict = {}, {}, {} + for filepath in filepaths: + filename = os.path.basename(filepath) + + if filename.find('_track.data')>0: + CamerType = filename.split('_')[0] + trackerboxes, trackerfeats = extract_data_1(filepath) + tracker_boxes.append((CamerType, trackerboxes, trackerfeats)) + + if filename.find('process.data')==0: + WeightDict, SensorDict, ProcessTimeDict = read_weight_timeConsuming(filepath) + + '''====================重力信号处理====================''' + weights = [(float(t), w) for t, w in WeightDict.items()] + weights = np.array(weights) + + return tracker_boxes, weights + + +def main(): + eventdir = r"\\192.168.1.28\share\测试_202406\0819\images\20240817-192549-6940120c-634c-481b-97a6-65042729f86b_null" + + tracker_boxes, weights = read_yolo_weight_data(eventdir) + + '''====================图像运动分析====================''' + win_width = 12 + periods = [] + for ctype, tboxes, _ in tracker_boxes: + period = devide_motion_state(tboxes, win_width) + + periods.append((ctype, period)) + print('done!') + + '''===============重力、图像信息融合===================''' + state_measure(periods, weights) + + +if __name__ == "__main__": + main() + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/imgs_inference.py b/imgs_inference.py new file mode 100644 index 0000000..ed314f3 --- /dev/null +++ b/imgs_inference.py @@ -0,0 +1,435 @@ +# -*- coding: utf-8 -*- +""" +Created on Fri Oct 18 13:09:42 2024 + +@author: ym +""" +import argparse +import os +import sys +import torch +from pathlib import Path +import numpy as np + +# from matplotlib.pylab import mpl +# mpl.use('Qt5Agg') +import matplotlib.pyplot as plt + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[0] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from models.common import DetectMultiBackend + +from utils.augmentations import letterbox + + +from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, + increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh) +from utils.torch_utils import select_device, smart_inference_mode + +'''集成跟踪模块,输出跟踪结果文件 .npy''' +# from ultralytics.engine.results import Boxes # Results +# from ultralytics.utils import IterableSimpleNamespace, yaml_load +from tracking.utils.plotting import Annotator, colors +from tracking.utils import Boxes, IterableSimpleNamespace, yaml_load, boxes_add_fid +from tracking.trackers import BOTSORT, BYTETracker +from tracking.utils.showtrack import drawtracks +from hands.hand_inference import hand_pose + +from tracking.trackers.reid.reid_interface import ReIDInterface +from tracking.trackers.reid.config import config as ReIDConfig + +ReIDEncoder = ReIDInterface(ReIDConfig) +IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes +VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes + +class LoadImages: + # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` + def __init__(self, files, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): + images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS] + videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS] + ni, nv = len(images), len(videos) + + + + self.img_size = img_size + self.stride = stride + self.files = images + videos + self.nf = ni + nv # number of files + self.video_flag = [False] * ni + [True] * nv + self.mode = 'image' + self.auto = auto + self.transforms = transforms # optional + self.vid_stride = vid_stride # video frame-rate stride + if any(videos): + self._new_video(videos[0]) # new video + else: + self.cap = None + + + def __iter__(self): + self.count = 0 + return self + + def __next__(self): + if self.count == self.nf: + raise StopIteration + path = self.files[self.count] + + if self.video_flag[self.count]: + # Read video + self.mode = 'video' + for _ in range(self.vid_stride): + self.cap.grab() + ret_val, im0 = self.cap.retrieve() + while not ret_val: + self.count += 1 + self.cap.release() + if self.count == self.nf: # last video + raise StopIteration + path = self.files[self.count] + self._new_video(path) + ret_val, im0 = self.cap.read() + + self.frame += 1 + # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False + s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' + + else: + # Read image + self.count += 1 + im0 = cv2.imread(path) # BGR + + # image rorate + (h, w) = im0.shape[:2] + center = (w // 2, h // 2) + angle = 90 + scale = 1.0 + M = cv2.getRotationMatrix2D(center, angle, scale) + im0 = cv2.warpAffine(im0, M, (h, w)) + + assert im0 is not None, f'Image Not Found {path}' + s = f'image {self.count}/{self.nf} {path}: ' + + if self.transforms: + im = self.transforms(im0) # transforms + else: + im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize + im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + im = np.ascontiguousarray(im) # contiguous + + return path, im, im0, self.cap, s + + def _new_video(self, path): + # Create a new video capture object + self.frame = 0 + self.cap = cv2.VideoCapture(path) + self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride) + self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees + # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493 + + def _cv2_rotate(self, im): + # Rotate a cv2 video manually + if self.orientation == 0: + return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE) + elif self.orientation == 180: + return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE) + elif self.orientation == 90: + return cv2.rotate(im, cv2.ROTATE_180) + return im + + def __len__(self): + return self.nf # number of files + +def inference_image(image, detections): + H, W, _ = np.shape(image) + imgs = [] + batch_patches = [] + patches = [] + for d in range(np.size(detections, 0)): + tlbr = detections[d, :4].astype(np.int_) + tlbr[0] = max(0, tlbr[0]) + tlbr[1] = max(0, tlbr[1]) + tlbr[2] = min(W - 1, tlbr[2]) + tlbr[3] = min(H - 1, tlbr[3]) + img1 = image[tlbr[1]:tlbr[3], tlbr[0]:tlbr[2], :] + + img = img1[:, :, ::-1].copy() # the model expects RGB inputs + patch = ReIDEncoder.transform(img) + + imgs.append(img1) + # patch = patch.to(device=self.device).half() + if str(ReIDEncoder.device) != "cpu": + patch = patch.to(device=ReIDEncoder.device).half() + else: + patch = patch.to(device=ReIDEncoder.device) + + patches.append(patch) + if (d + 1) % ReIDEncoder.batch_size == 0: + patches = torch.stack(patches, dim=0) + batch_patches.append(patches) + patches = [] + + if len(patches): + patches = torch.stack(patches, dim=0) + batch_patches.append(patches) + + features = np.zeros((0, ReIDEncoder.embedding_size)) + for patches in batch_patches: + pred = ReIDEncoder.model(patches) + pred[torch.isinf(pred)] = 1.0 + feat = pred.cpu().data.numpy() + features = np.vstack((features, feat)) + + return imgs, features + + +def init_trackers(tracker_yaml = None, bs=1): + """ + Initialize trackers for object tracking during prediction. + """ + # tracker_yaml = r"./tracking/trackers/cfg/botsort.yaml" + + TRACKER_MAP = {'bytetrack': BYTETracker, 'botsort': BOTSORT} + + cfg = IterableSimpleNamespace(**yaml_load(tracker_yaml)) + trackers = [] + for _ in range(bs): + tracker = TRACKER_MAP[cfg.tracker_type](args=cfg, frame_rate=30) + trackers.append(tracker) + + return trackers + + +def run( + weights=ROOT / 'yolov5s.pt', # model path or triton URL + source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) + + project=ROOT / 'runs/detect', # save results to project/name + name='exp', # save results to project/name + + tracker_yaml = "./tracking/trackers/cfg/botsort.yaml", + imgsz=(640, 640), # inference size (height, width) + conf_thres=0.25, # confidence threshold + iou_thres=0.45, # NMS IOU threshold + max_det=1000, # maximum detections per image + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + view_img=False, # show results + save_txt=False, # save results to *.txt + save_csv=False, # save results in CSV format + save_conf=False, # save confidences in --save-txt labels + save_crop=False, # save cropped prediction boxes + save_img = True, + nosave=False, # do not save images/videos + classes=None, # filter by class: --class 0, or --class 0 2 3 + agnostic_nms=False, # class-agnostic NMS + augment=False, # augmented inference + visualize=False, # visualize features + update=False, # update all models + exist_ok=False, # existing project/name ok, do not increment + line_thickness=3, # bounding box thickness (pixels) + hide_labels=False, # hide labels + hide_conf=False, # hide confidencesL + half=False, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + vid_stride=1, # video frame-rate stride + data=ROOT / 'data/coco128.yaml', # dataset.yaml path + ): + assert isinstance(source,list), "source must be a list" + + fulldir, imgname = os.path.split(source[0]) + imgbase, ext = os.path.splitext(imgname) + + + # 事件名、相机类型 + EventName = fulldir.split('\\')[-2] + "_" + str(Path(fulldir).stem) + CamerType = imgbase.split('_')[1] + + save_dir = Path(project) / Path(EventName) + if save_dir.exists(): + print(Path(fulldir).stem) + + # save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + # save_dir.mkdir(parents=True, exist_ok=True) # make dir + else: + save_dir.mkdir(parents=True, exist_ok=True) + + save_path_video = os.path.join(str(save_dir), f"{EventName}_{CamerType}") + # Load model + device = select_device(device) + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, names, pt = model.stride, model.names, model.pt + imgsz = check_img_size(imgsz, s=stride) # check image size + + bs = 1 # batch_size + + dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + vid_path, vid_writer = [None] * bs, [None] * bs + + # Run inference + model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup + seen, dt = 0, (Profile(), Profile(), Profile()) + + tracker = init_trackers(tracker_yaml, bs)[0] + track_boxes = np.empty((0, 10), dtype = np.float32) + k = 0 + for path, im, im0s, vid_cap, s in dataset: + # k +=1 + # if k==60: + # break + + timeStamp = Path(path).stem.split('_')[2] + + with dt[0]: + im = torch.from_numpy(im).to(model.device) + im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + if len(im.shape) == 3: + im = im[None] # expand for batch dim + + # Inference + with dt[1]: + visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False + pred = model(im, augment=augment, visualize=visualize) + + # NMS + with dt[2]: + pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) + + # Process predictions + for i, det in enumerate(pred): # per image + seen += 1 + im0 = im0s.copy() + + s += '%gx%g ' % im.shape[2:] # print string + annotator = Annotator(im0s, line_width=line_thickness, example=str(names)) + + nd = len(det) + if nd: + # Rescale boxes from img_size to im0 size + det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() + + '''tracks: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index, timestamp] + 0 1 2 3 4 5 6 7 8 + 这里,frame_index 也可以用视频的 帧ID 代替, box_index 保持不变 + ''' + + det_tracking = Boxes(det, im0.shape).cpu().numpy() + tracks = tracker.update(det_tracking, im0) + if len(tracks) == 0: + continue + tracks[:, 7] = dataset.count + + + stamp = np.ones((len(tracks), 1)) * int(timeStamp) + tracks = np.concatenate((tracks, stamp), axis=1) + + '''================== 1. 存储 dets/subimgs/features Dict =============''' + # imgs, features = inference_image(im0, tracks) + track_boxes = np.concatenate([track_boxes, tracks], axis=0) + + for *xyxy, id, conf, cls, fid, bid, t in reversed(tracks): + name = ('' if id==-1 else f'id:{int(id)} ') + names[int(cls)] + label = None if hide_labels else (name if hide_conf else f'{name} {conf:.2f}') + + if id >=0 and cls==0: + color = colors(int(cls), True) + elif id >=0 and cls!=0: + color = colors(int(id), True) + else: + color = colors(19, True) # 19为调色板的最后一个元素 + + annotator.box_label(xyxy, label, color=color) + + # Save results (image and video with tracking) + im0 = annotator.result() + p = Path(path) # to Path + if save_img: + imgpath = str(save_dir/p.stem) + f"_{dataset.count}.png" + cv2.imwrite(Path(imgpath), im0) + if vid_path[i] != save_path_video: # new video + vid_path[i] = save_path_video + if isinstance(vid_writer[i], cv2.VideoWriter): + vid_writer[i].release() # release previous video writer + fps, w, h = 30, im0.shape[1], im0.shape[0] + vpath = str(Path(save_path_video).with_suffix('.mp4')) + vid_writer[i] = cv2.VideoWriter(vpath, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + + vid_writer[i].write(im0) + + LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") + + for v in vid_writer: + v.release() + + if track_boxes.size == 0: + return CamerType, [] + + save_path_np = os.path.join(str(fulldir), f"{EventName}_{CamerType}") + np.save(save_path_np, track_boxes) + + # Print results + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) + if save_txt or save_img: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + if update: + strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning) + + + return CamerType, track_boxes + + +def parse_opt(): + modelpath = ROOT / 'ckpts/best_cls10_0906.pt' # 'ckpts/best_15000_0908.pt', 'ckpts/yolov5s.pt', 'ckpts/best_20000_cls30.pt, best_yolov5m_250000' + + parser = argparse.ArgumentParser() + parser.add_argument('--weights', nargs='+', type=str, default=modelpath, help='model path or triton URL') # 'yolov5s.pt', best_15000_0908.pt + parser.add_argument('--source', type=str, default='', help='file/dir/URL/glob/screen/0(webcam)') # images, videos + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') + parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--view-img', action='store_true', help='show results') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-csv', action='store_true', help='save results in CSV format') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') + parser.add_argument('--nosave', action='store_true', help='do not save images/videos') + parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') + parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--visualize', action='store_true', help='visualize features') + parser.add_argument('--update', action='store_true', help='update all models') + parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save results to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') + parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') + parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride') + opt = parser.parse_args() + opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand + print_args(vars(opt)) + return opt + +def run_yolo(eventdir, savedir): + + opt = parse_opt() + optdict = vars(opt) + optdict["project"] = savedir + optdict["source"] = eventdir + run(**vars(opt)) + + + + + diff --git a/move_detect.py b/move_detect.py new file mode 100644 index 0000000..4701fdb --- /dev/null +++ b/move_detect.py @@ -0,0 +1,243 @@ +# -*- coding: utf-8 -*- +""" +Created on Mon Oct 14 10:01:24 2024 + +@author: ym +""" +import numpy as np +import cv2 +from scipy.spatial.distance import cdist + + +class TrackFrag: + + def __init__(self, boxes, imgshape=(1280, 1024)): + self.boxes = boxes + self.cls = int(boxes[0, 6]) + self.tid = int(boxes[0, 4]) + self.imgshape = imgshape + + + '''轨迹的持续时间,以帧ID表征, (或考虑用时间戳)''' + self.during = (np.min(boxes[:, 7]), np.max(boxes[:, 7])) + + self.groups = [set(np.unique(boxes[:, 7].astype(int)))] + + # '''5个关键点(中心点、左上点、右上点、左下点、右下点 )坐标''' + self.isCornpoint = self.is_cornpoint(10) + self.compute_cornpoints() + + + def is_cornpoint(self, edge=10): + + isleft = min(self.boxes[:, 0]) < edge + istop = min(self.boxes[:, 1]) < edge + isright = max(self.boxes[:, 2]) > self.imgshape[0] - edge + isbottom = max(self.boxes[:, 3]) > self.imgshape[0] - edge + + isCornpoint = isbottom or istop or isleft or isright + return isCornpoint + + + + def compute_cornpoints(self): + ''' + cornpoints 共10项,分别是个点的坐标值(x, y) + (center, top_left, top_right, bottom_left, bottom_right) + ''' + boxes = self.boxes + cornpoints = np.zeros((len(boxes), 10)) + + cornpoints[:,0] = (boxes[:, 0] + boxes[:, 2]) / 2 + cornpoints[:,1] = (boxes[:, 1] + boxes[:, 3]) / 2 + cornpoints[:,2], cornpoints[:,3] = boxes[:, 0], boxes[:, 1] + cornpoints[:,4], cornpoints[:,5] = boxes[:, 2], boxes[:, 1] + cornpoints[:,6], cornpoints[:,7] = boxes[:, 0], boxes[:, 3] + cornpoints[:,8], cornpoints[:,9] = boxes[:, 2], boxes[:, 3] + + trajdist = [] + for k in range(5): + X = cornpoints[:, 2*k:2*(k+1)] + trajdist.append(np.max(cdist(X, X))) + + idx = trajdist.index(min(trajdist)) + + self.trajdist_min = trajdist[idx] + self.cornpoints = cornpoints + + + + + + def update_groups(self, THRESH=18): + ''' + 对 self.groups 重新赋值 + ''' + + boxes = self.boxes + nbox = len(boxes) + + X = np.zeros((len(boxes), 2)) + X[:,0] = (boxes[:, 0] + boxes[:, 2]) / 2 + X[:,1] = (boxes[:, 1] + boxes[:, 3]) / 2 + + dist2 = cdist(X, X) + # label = np.zeros(nbox, dtype=np.int) + + marked, groups = set(), [] + for k in range(nbox): + if k in marked: + continue + group = set() + + dt = dist2[k, :] + idx = np.where(dt < THRESH)[0] + + if len(idx) == 1: + groups.append({k}) + marked.add(k) + continue + '''初始近邻样本点集合, 并移除当前点''' + seeds = set(idx) + seeds.remove(k) + + group.add(k) + marked.add(k) + while len(seeds) !=0: + pt = seeds.pop() + dt = dist2[pt, :] + + seed = set(np.where(dt < THRESH)[0]) + seed.remove(pt) + + seed.difference_update(marked) + seeds.update(seed) + + group.add(pt) + marked.add(pt) + + groups.append(group) + + self.groups = groups + + + def jump_boxes(self): + gpboxes = [] + for group in self.groups: + box = self.boxes[list(group), :] + gpboxes.append(box) + + return gpboxes + + + + def is_moving(self): + if len(self.groups)>=3: + return True + + return False + + + def is_static(self): + box1 = self.boxes[0, :4] + box2 = self.boxes[-1, :4] + + ''' 第1帧、最后一帧 boxes 四个角点间的距离 ''' + ptd = box2 - box1 + ptd1 = np.linalg.norm((ptd[0], ptd[1])) + ptd2 = np.linalg.norm((ptd[2], ptd[1])) + ptd3 = np.linalg.norm((ptd[0], ptd[3])) + ptd4 = np.linalg.norm((ptd[2], ptd[3])) + condt1 = ptd1<50 and ptd2<50 and ptd3<50 and ptd4<50 + + if not self.isCornpoint: + self.trajdist_min < 120 + + + # condt2 = self.TrajFeat[3] < 50 + # condt = condt1 or condt2 + return condt1 + + +class MoveDetect: + def __init__(self, bboxes, imgshape=(1280, 1024)): + self.bboxes = bboxes + self.shape = imgshape + + self.temp = np.zeros(imgshape, np.uint8) + + + self.trackIDs = np.unique(bboxes[:, 4].astype(int)) + # self.frameID = np.unique(bboxes[:, 7].astype(int)) + # self.fnum = len(self.frameID) + + self.lboxes = self.array2list() + + self.tracks = [TrackFrag(b) for b in self.lboxes] + + def classify(self): + + tracks = self.tracks + + '''减去静止轨迹''' + tracks_static = [t for t in tracks if t.is_static()] + tracks = self.sub_tracks(tracks, tracks_static) + + '''更新轨迹点聚类''' + for track in tracks: + track.update_groups(18) + + + self.track_motion = [t for t in tracks if len(t.groups)>=3] + + + def draw(self): + pass + + + + def array2list(self): + ''' + 将 bboxes 变换为 track 列表 + bboxes: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index] + Return: + lboxes:列表,列表中元素具有同一 track_id,x1y1x2y2 格式 + [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index] + ''' + track_ids = self.bboxes[:, 4].astype(int) + lboxes = [] + for t_id in self.trackIDs: + # print(f"The ID is: {t_id}") + idx = np.where(track_ids == t_id)[0] + box = self.bboxes[idx, :] + + lboxes.append(box) + + return lboxes + + @staticmethod + def sub_tracks(tlista, tlistb): + track_ids_b = {t.tid for t in tlistb} + return [t for t in tlista if t.tid not in track_ids_b] + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/pipeline.py b/pipeline.py index 1d1c0df..e013fd6 100644 --- a/pipeline.py +++ b/pipeline.py @@ -7,24 +7,20 @@ Created on Sun Sep 29 08:59:21 2024 import os import cv2 import pickle +import numpy as np from pathlib import Path from track_reid import parse_opt, yolo_resnet_tracker from tracking.dotrack.dotracks_back import doBackTracks from tracking.dotrack.dotracks_front import doFrontTracks +from tracking.utils.drawtracks import plot_frameID_y2, draw_all_trajectories +from utils.getsource import get_image_pairs, get_video_pairs -IMGFORMATS = '.bmp', '.jpeg', '.jpg', 'png', 'tif', 'tiff', 'webp', 'pfm' -VIDFORMATS = '.avi', '.gif', '.m4v', '.mkv', '.mov', '.mp4', '.ts', '.wmv' - -std_feature_path = r"\\192.168.1.28\share\测试_202406\contrast\std_features_2192_ft32vsft16" - - -opt = parse_opt() -optdict = vars(opt) +std_feature_path = r"\\192.168.1.28\share\测试_202406\contrast\std_features_2192_ft32vsft16" def get_interbcd_inputenents(): bcdpath = r"\\192.168.1.28\share\测试_202406\contrast\std_barcodes_2192" @@ -44,108 +40,118 @@ def get_interbcd_inputenents(): return input_enents + +def pipeline(eventpath, stdfeat_path=None, SourceType = "image"): + ''' + inputs: + eventpath: 事件文件夹 + stdfeat_path: 标准特征文件地址 + outputs: - - -def get_video_pairs(vpath): - vdieopath = [] - for filename in os.listdir(vpath): - file, ext = os.path.splitext(filename) - if ext in VIDFORMATS: - vdieopath.append(os.path.join(vpath, filename)) - return vdieopath - -def pipeline(eventpath, stdfeat_path): + ''' + SourceType = "image" # image + # eventpath = r"\\192.168.1.28\share\测试_202406\0918\images1\20240918-110822-1bc3902e-5a8e-4e23-8eca-fb3f02738551_6938314601726" savepath = r"D:\contrast\detect" + opt = parse_opt() + optdict = vars(opt) optdict["project"] = savepath eventname = os.path.basename(eventpath) - barcode = eventname.split('_')[-1] + # barcode = eventname.split('_')[-1] + + if SourceType == "video": + vpaths = get_video_pairs(eventpath) + elif SourceType == "image": + vpaths = get_image_pairs(eventpath) + + - vpaths = get_video_pairs(eventpath) event_tracks = [] for vpath in vpaths: '''事件结果文件夹''' save_dir_event = Path(savepath) / Path(eventname) - save_dir_img = save_dir_event / Path(str(Path(vpath).stem)) - if not save_dir_img.exists(): - save_dir_img.mkdir(parents=True, exist_ok=True) + if isinstance(vpath, list): + save_dir_video = save_dir_event / Path("images") + else: + save_dir_video = save_dir_event / Path(str(Path(vpath).stem)) + + + if not save_dir_video.exists(): + save_dir_video.mkdir(parents=True, exist_ok=True) '''Yolo + Resnet + Tracker''' optdict["source"] = vpath - optdict["save_dir"] = save_dir_img - optdict["nosave"] = False + optdict["save_dir"] = save_dir_video + optdict["is_save_img"] = True + optdict["is_save_video"] = True tracksdict = yolo_resnet_tracker(**optdict) bboxes = tracksdict['TrackBoxes'] - bname = os.path.basename(vpath) + bname = os.path.basename(vpath[0]) if isinstance(vpath, list) else os.path.basename(vpath) if bname.split('_')[0] == "0" or bname.find('back')>=0: - vts = doFrontTracks(bboxes, tracksdict) + vts = doBackTracks(bboxes, tracksdict) vts.classify() event_tracks.append(("back", vts)) if bname.split('_')[0] == "1" or bname.find('front')>=0: - vts = doBackTracks(bboxes, tracksdict) + vts = doFrontTracks(bboxes, tracksdict) vts.classify() event_tracks.append(("front", vts)) + '''轨迹显示模块''' + illus = [None, None] for CamerType, vts in event_tracks: - if CamerType == 'back': - pass if CamerType == 'front': - pass + edgeline = cv2.imread("./tracking/shopcart/cart_tempt/board_ftmp_line.png") + img_tracking = draw_all_trajectories(vts, edgeline, save_dir_event, CamerType, draw5p=True) + illus[0] = img_tracking + + + plt = plot_frameID_y2(vts) + plt.savefig(os.path.join(save_dir_event, "front_y2.png")) + + if CamerType == 'back': + edgeline = cv2.imread("./tracking/shopcart/cart_tempt/edgeline.png") + img_tracking = draw_all_trajectories(vts, edgeline, save_dir_event, CamerType, draw5p=True) + illus[1] = img_tracking + + illus = [im for im in illus if im is not None] + if len(illus): + img_cat = np.concatenate(illus, axis = 1) + if len(illus)==2: + H, W = img_cat.shape[:2] + cv2.line(img_cat, (int(W/2), 0), (int(W/2), int(H)), (128, 128, 255), 3) + + trajpath = os.path.join(save_dir_event, "traj.png") + cv2.imwrite(trajpath, img_cat) + + '''前后摄轨迹选择''' - - if stdfeat_path is not None: with open(stdfeat_path, 'rb') as f: featDict = pickle.load(f) - - - - - - - - - - - - - - -def main(): +def main_loop(): bcdpath = r"\\192.168.1.28\share\测试_202406\contrast\std_barcodes_2192" eventpath = r"\\192.168.1.28\share\测试_202406\0918\images1" - - + SourceType = "image" # video, image + barcodes = [] input_enents = [] output_events = [] - - - # input_enents = get_interbcd_inputenents() - # k = 0 - # for event in input_enents: - # pipeline(event) - - # k += 1 - # if k ==1: - # break - - + + '''1. 获得barcode标准特征集列表''' for featname in os.listdir(bcdpath): barcode, ext = os.path.splitext(featname) @@ -153,31 +159,30 @@ def main(): continue barcodes.append(barcode) - - - - - - - + '''2. 构造(放入事件,标准特征)对''' for filename in os.listdir(eventpath): + '''barcode为时间文件夹的最后一个字段''' bcd = filename.split('_')[-1] event_path = os.path.join(eventpath, filename) stdfeat_path = None if bcd in barcodes: - stdfeat_path = os.path.join(bcdpath, f"{bcd}.pickle") - + stdfeat_path = os.path.join(bcdpath, f"{bcd}.pickle") input_enents.append((event_path, stdfeat_path)) - - - + for eventpath, stdfeat_path in input_enents: - pipeline(eventpath, stdfeat_path) + pipeline(eventpath, stdfeat_path, SourceType) - +def main(): + eventpath = r"D:\datasets\ym\exhibition\175836" + SourceType = 'image' + stdfeat_path = None + + pipeline(eventpath, stdfeat_path, SourceType) + + @@ -187,4 +192,10 @@ def main(): if __name__ == "__main__": - main() \ No newline at end of file + main() + + + + + + \ No newline at end of file diff --git a/pipeline_extract_subimg.py b/pipeline_extract_subimg.py index 1e3a960..8a3cb9c 100644 --- a/pipeline_extract_subimg.py +++ b/pipeline_extract_subimg.py @@ -2,6 +2,8 @@ """ Created on Sun Sep 29 08:59:21 2024 + 针对现场采集的视频,利用算法pipeline提取运动轨迹内的subimg,代替人工图像筛选、标注 + @author: ym """ import os diff --git a/time_devide.py b/time_devide.py new file mode 100644 index 0000000..e843a7e --- /dev/null +++ b/time_devide.py @@ -0,0 +1,451 @@ +# -*- coding: utf-8 -*- +""" +Created on Wed Oct 16 17:37:07 2024 + +@author: ym +""" + +# import csv +import os +# import platform +# import sys +from pathlib import Path +import glob +import numpy as np +import copy + + +import matplotlib.pyplot as plt +from imgs_inference import run_yolo + +from event_time_specify import devide_motion_state#, state_measure +from tracking.utils.read_data import read_seneor + + + +# IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes +# VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes + + +def filesort(p): + ''' + 需将图像文件名标准化 + ''' + + + files = [] + files.extend(sorted(glob.glob(os.path.join(p, '*.jpg')))) + # images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS] + + tamps_0, tamps_1 = [], [] + files_0, files_1 = [], [] + for file in files: + basename = os.path.basename(file) + # if basename.find('frameId')<0: continue + + + f, ext = os.path.splitext(basename) + _, camer, tamp = f.split('_') + + if camer == '0': + tamps_0.append(int(tamp)) + files_0.append(file) + + if camer == '1': + tamps_1.append(int(tamp)) + files_1.append(file) + + idx0 = sorted(range(len(tamps_0)), key=lambda k: tamps_0[k]) + files0 = [files_0[i] for i in idx0] + + idx1 = sorted(range(len(tamps_1)), key=lambda k: tamps_1[k]) + files1 = [files_1[i] for i in idx1] + + + return (files0, files1) + + + + + +def rename(filePath, tmin): + """ + 重命名函数fun1 + 输入:文件夹路径 + 功能:对文件夹中的全部文件进行随机命名 + """ + suffix = '.png' # 设置后缀,筛选特定文件以更改名称 + for file in os.listdir(filePath): + if file.endswith(suffix): + name = file.split('.')[0] + + tamp = int(name.split('_')[2])-tmin + + suffix = file.split('.')[1] + + newname = name +f'_{int(tamp)}.'+suffix + + os.rename(os.path.join(filePath, file), os.path.join(filePath, newname)) + + +def rerename(filePath=None): + """ + 重命名函数fun1 + 输入:文件夹路径 + 功能:对文件夹中的全部文件进行随机命名 + """ + + suffix = '.png' # 设置后缀,筛选特定文件以更改名称 + for file in os.listdir(filePath): + if file.endswith(suffix): + name = file.split('.')[0] + names = name.split('_') + if len(names)>=6: + newname = "_".join(names[0:5])+'.png' + os.rename(os.path.join(filePath, file), os.path.join(filePath, newname)) + + +def state_measure(periods, weights, spath=None): + ''' + 数据类型 + 后摄: 0, 前摄: 1, CV综合: 2, 重力: 9 + frstate_0/1: + 帧ID 时间戳 单摄状态1 单摄状态2 活动轨迹标记 + 0 1 2 3 4:end + + + + time_stream = np.concatenate((tstream, weights)) + time_stream: + 序列索引号 数据类型 时间戳 单摄状态1/重力值 单摄状态2/重力(t0, t1) CV状态/重力(t0', t1') 综合数据类型 综合状态 + 0 1 2 3 4 5 6 7 + + 单摄状态1:基于运动轨迹的起止点确定的运动区间 + 单摄状态2: 基于滑动窗口的起止点确定的运动区间 + 重力(t0, t1): 重力波动的精确时间区间,基于重力波动的起止点,而不是仅依赖重力稳定时间 + 重力(t0', t1'): 根据加退购对重力波动窗口进行扩展,扩展应该涵盖购物事件的发生过程 + 方案: + 前后摄状态进行或运算 + CV与重力状态进行与运算 + ''' + + # BackType = 0 # 后摄数据类型 + # FrontType = 1 # 前摄数据类型 + CameraType = 2 # CV数据综合类型 + WeightType = 9 # 重力数据类型 + WeightStableThresh = 7.5 # 单位:g,重力稳定状态下的最大波动范围 + WeightWinWidth = 10 # 单位:重力数据点数,该值和采样间隔关联,重力稳定时间设定为500ms = WeightWinWidth * 采样间隔 + CameraTimeInterval = 100 # 单位:ms,前后摄状态进行或运算时的时间间隔阈值 + InputFrontNum = 10 # 重力增加(放入)向前延拓的重力数据点数 + InputBackNum = 0 # 重力增加(放入)向后延拓的重力数据点数 + OutputFrontNum = 2 # 重力减少(取出)向前延拓的重力数据点数 + OutputBackNum = 10 # 重力减少(取出)向前延拓的重力数据点数 + CompTimeInterval = 150 # 单位:ms,CV状态和重力状态进行与运算时的时间间隔阈值 + + + '''==================== 1.1 Weight 数据综合并排序 =======================''' + nw = len(weights) + widx = np.array([k for k in range(0, nw)])[:, None] + wtype = WeightType * np.ones((nw, 1)) + wstate = np.zeros((nw, 4)) + weights = np.concatenate((widx, wtype, weights, wstate), axis=1).astype(np.int64) + weights[:, 6] = WeightType + + weights = weights[np.argsort(weights[:, 2]), :] + + '''=================== 1.2 基确Weight的状态切割 =========================''' + w_max = np.max(weights[:, 3]) + # i0=0 + for i2 in range(0, nw): + i1 = max(i2 - WeightWinWidth, 0) + wvalue = weights[i1:i2+1, 3] + wi2 = weights[i2, 3] + wmin = np.min(wvalue) + wmax = np.max(wvalue) + + + '''对重力波动区间进行标记,并标记最新一次重力稳定值的索引和相应重力值''' + if wmax - wmin > WeightStableThresh: + weights[i2, 4] = w_max + elif i2==0: + i0=0 + wi0 = weights[i0, 3] + elif i2>0 and weights[i2-1, 4]==0: + i0 = copy.deepcopy(i2) + wi0 = weights[i0, 3] + + + if i2>0 and weights[i2-1, 4]!=0 and weights[i2, 4]==0: + # 当前稳定状态下的重力值和前一次重力稳定值的差值,确定放入还是取出 + if wi2-wi0 > WeightStableThresh: + i00 = max(i0 - InputFrontNum, 0) + i22 = min(i2 + InputBackNum, nw) + elif wi2-wi0 < -1*WeightStableThresh: + i00 = max(i0 - OutputFrontNum, 0) + i22 = min(i2 + OutputBackNum, nw) + else: + i00 = max(i0 - max(InputFrontNum, OutputFrontNum), 0) + i22 = min(i2 + max(InputBackNum, OutputBackNum), nw) + + weights[i00:i22, 5] = w_max + 100 + + + '''===================== 2.1 CV 数据综合并排序 ==========================''' + BackType, frstate_0 = periods[0] + FrontType, frstate_1 = periods[1] + + + n0, n1 = len(frstate_0), len(frstate_1) + idx0 = np.array([i for i in range(0, n0)], dtype=np.int64)[:, None] + idx1 = np.array([i for i in range(0, n1)], dtype=np.int64)[:, None] + ctype0 = BackType * np.ones((n0, 1), dtype=np.int64) + ctype1 = FrontType * np.ones((n1, 1), dtype=np.int64) + tstamp0 = frstate_0[:,1][:, None] + tstamp1 = frstate_1[:,1][:, None] + state0 = frstate_0[:,2][:, None] + state00 = frstate_0[:,3][:, None] + + state1 = frstate_1[:,2][:, None] + state11 = frstate_1[:,3][:, None] + + + + + '''序列索引号, 相机类型,时间戳, 单摄状态1、单摄状态2、CV综合状态、综合数据类型、综合状态 + 0 1 2 3 4 5 6 7 + ''' + tstream0 = np.concatenate((idx0, ctype0, tstamp0, state0, state00), axis=1) + tstream1 = np.concatenate((idx1, ctype1, tstamp1, state1, state11), axis=1) + tstream = np.concatenate((tstream0, tstream1), axis=0) + tstream = np.concatenate((tstream, np.zeros((len(tstream), 3), dtype=np.int64)), axis=1) + tstream[:, 6] = CameraType + tstream = tstream[np.argsort(tstream[:, 2]), :] + + '''=============== 2.2 基于前后摄运动轨迹起止点确定CV综合状态 ============''' + for i in range(0, len(tstream)): + idx, ctype, stamp, state = tstream[i, :4] + if i==0: + tstream[i, 5] = state + if i>0: + j = i-1 + idx0, ctype0, stamp0, state0 = tstream[j, :4] + while stamp-stamp0 < CameraTimeInterval and ctype == ctype0 and j>0: + j -= 1 + idx0, ctype0, stamp0, state0 = tstream[j, :4] + + '''两摄像头状态的或运算. 由于前后摄图像不同时,如何构造或运算,关键在于选择不同摄像头的对齐点 + i时刻摄像头(ctype)状态state,另一摄像头(ctype0 != ctype)距 i 最近最近时刻 j 的状态state0 + ''' + if ctype != ctype0 and state0==1: + tstream[i, 5] = state0 + else: + tstream[i, 5] = state + + + '''================ 3.1 CV、Wweight 数据综合并排序 ======================''' + time_stream = np.concatenate((tstream, weights), axis=0, dtype=np.int64) + time_stream = time_stream[np.argsort(time_stream[:, 2]), :] + tmin = np.min(time_stream[:, 2]) + time_stream[:, 2] = time_stream[:, 2] - tmin + + '''============== 3.2 基于 CV 和 Weight 确定 Cart 的综合状态 ============''' + for i in range(0, len(time_stream)): + idx, _, stamp, value, _, state, ctype = time_stream[i, :7] + state = min(state, 1) + + if i==0: + time_stream[i, 7] = state + + if i>0: + j = i-1 + idx0, _, stamp0, value0, _, state0, ctype0 = time_stream[j, :7] + while stamp-stamp0 < CompTimeInterval and ctype == ctype0 and j>0: + j -= 1 + idx0, _, stamp0, value0, _, state0, ctype0 = time_stream[j, :7] + + '''CV与Weight的与运算. 由于CV与Weight不同时,如何构造与运算,关键在于选择不同数据源的对齐点 + i时数据类型(ctype)状态state,另一数据类型(ctype0 != ctype)距 i 最近最近时刻 j 的状态state0 + ''' + if ctype != ctype0 and state !=0 and state0 !=0: + time_stream[i, 7] = 1 + + MotionSlice = [] + + motion_slice = [] + + t0 = time_stream[0, 7] + for i in range(1, len(time_stream)): + f0 = time_stream[i-1, 7] + f1 = time_stream[i, 7] + if f0==0 and f1==1: + t0 = time_stream[i, 2] + elif f0==1 and f1==0: + t1 = time_stream[i, 2] + if t1-t0>100: #ms + MotionSlice.append((t0+tmin, t1+tmin)) + motion_slice.append((t0, t1)) + else: + print(f"T0: {t0}, T1: {t1}") + + + '''========================== 4 结果显示 ================================''' + frstate_0[:, 1] = frstate_0[:, 1]-tmin + frstate_1[:, 1] = frstate_1[:, 1]-tmin + tstream[:, 2] = tstream[:, 2]-tmin + + fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, 1) + during = np.max(time_stream[:, 2]) + + ax1.plot(weights[:, 2]-tmin, weights[:, 3], 'bo-', linewidth=1, markersize=4) + ax1.plot(weights[:, 2]-tmin, weights[:, 4], 'mx-', linewidth=1, markersize=4) + ax1.plot(weights[:, 2]-tmin, weights[:, 5], 'gx-', linewidth=1, markersize=4) + ax1.set_xlim([0, during]) + ax1.set_title('Weight (gram)') + + ax2.plot(frstate_0[:, 1], frstate_0[:, 3], 'rx-', linewidth=1, markersize=8) + ax2.plot(frstate_0[:, 1], frstate_0[:, 2], 'bo-', linewidth=1, markersize=4) + ax2.set_xlim([0, during]) + ax2.set_title('Back Camera') + + ax3.plot(frstate_1[:, 1], frstate_1[:, 3], 'rx-', linewidth=1, markersize=8) + ax3.plot(frstate_1[:, 1], frstate_1[:, 2], 'bo-', linewidth=1, markersize=4) + ax3.set_xlim([0, during]) + ax3.set_title('Front Camera') + + ax4.plot(tstream[:, 2], tstream[:, 5], 'bx-', linewidth=1, markersize=4) + ax4.set_xlim([0, during]) + ax4.set_title('CV State') + + ax5.plot(time_stream[:, 2], time_stream[:, 7], 'gx-', linewidth=1, markersize=4) + ax5.set_xlim([0, during]) + ax5.set_title('Cart State') + + plt.show() + if spath: + plt.savefig(spath) + + return tmin, MotionSlice + +def splitevent(imgpath, MotionSlice): + suffix = '.png' + + imgfiles = [f for f in os.listdir(imgpath) if f.endswith(suffix)] + timestamp = np.array([int(f.split('_')[2]) for f in imgfiles]) + + indexes = [] + k = 0 + for t0, t1 in MotionSlice: + idx0 = set(np.where(timestamp >= t0)[0]) + idx1 = set(np.where(timestamp <= t1)[0]) + idx2 = list(idx0.intersection(idx1)) + files = [imgfiles[i] for i in idx2] + + for filename in files: + file, ext = os.path.splitext(filename) + newname = file + f'_{k}.png' + os.rename(os.path.join(imgpath, filename), os.path.join(imgpath, newname)) + k += 1 + + print("Done!") + + + +def runyolo(): + eventdirs = r"\\192.168.1.28\share\realtime\eventdata" + savedir = r"\\192.168.1.28\share\realtime\result" + + for edir in os.listdir(eventdirs): + source = os.path.join(eventdirs, edir) + files = filesort(source) + for flist in files: + run_yolo(flist, savedir) + +def run_tracking(trackboxes, MotionSlice): + pass + + + + + +def show_seri(): + datapath = r"\\192.168.1.28\share\realtime\eventdata\1728978106733" + savedir = r"\\192.168.1.28\share\realtime\result" + + + imgdir = datapath.split('\\')[-2] + "_" + datapath.split('\\')[-1] + imgpath = os.path.join(savedir, imgdir) + + eventname = Path(datapath).stem + + datafiles = sorted(glob.glob(os.path.join(datapath, '*.npy'))) + + periods, trackboxes = [], [] + win_width = 12 + for npypath in datafiles: + CameraType = Path(npypath).stem.split('_')[-1] + tkboxes = np.load(npypath) + + trackboxes.append((CameraType, tkboxes)) + + period = devide_motion_state(tkboxes, win_width) + periods.append((int(CameraType), period)) + + + + '''===============读取重力信号数据===================''' + seneorfile = os.path.join(datapath, 'sensor.txt') + WeightDict = read_seneor(seneorfile) + + weights = [(float(t), w) for t, w in WeightDict.items()] + weights = np.array(weights) + + + '''===============重力、图像信息融合===================''' + spath = os.path.join(savedir, f"{eventname}.png" ) + tmin, MotionSlice = state_measure(periods, weights, spath) + + + + + # 第一次运行时用于更改图像文件名 + # rerename(imgpath) + # rename(imgpath, tmin) + + + # splitevent(imgpath, MotionSlice) + + + + +def main(): + # runyolo() + + show_seri() + + +if __name__ == '__main__': + main() + + # imgpaths = r"\\192.168.1.28\share\realtime\result\eventdata_1728978106733" + # rerename(imgpaths) + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/track_reid.py b/track_reid.py index e927a77..3e687be 100644 --- a/track_reid.py +++ b/track_reid.py @@ -150,6 +150,8 @@ def yolo_resnet_tracker( save_crop=False, # save cropped prediction boxes nosave=False, # do not save images/videos + is_save_img = False, + is_save_video = True, classes=None, # filter by class: --class 0, or --class 0 2 3 @@ -166,9 +168,7 @@ def yolo_resnet_tracker( vid_stride=1, # video frame-rate stride data=ROOT / 'data/coco128.yaml', # dataset.yaml path ): - source = str(source) - save_img = not nosave and not source.endswith('.txt') # save inference images - + # source = str(source) # Load model device = select_device(device) model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) @@ -209,7 +209,6 @@ def yolo_resnet_tracker( for i, det in enumerate(pred): # per image im0 = im0s.copy() - save_path = str(save_dir / Path(path).name) # im.jpg s += '%gx%g ' % im.shape[2:] # print string annotator = Annotator(im0.copy(), line_width=line_thickness, example=str(names)) @@ -228,7 +227,14 @@ def yolo_resnet_tracker( tracks = tracker.update(det_tracking, im0) if len(tracks) == 0: continue - tracks[:, 7] = dataset.frame + + if dataset.mode == "video": + frameId = dataset.frame + else: + frameId = dataset.count + + tracks[:, 7] = frameId + '''================== 1. 存储 dets/subimgs/features Dict =============''' imgs, features = inference_image(im0, tracks) @@ -242,7 +248,7 @@ def yolo_resnet_tracker( imgdict.update({int(bid): imgs[ii]}) # [f"img_{int(bid)}"] = imgs[i] boxdict.update({int(bid): tracks[ii, :]}) # [f"box_{int(bid)}"] = tracks[i, :] featdict.update({int(bid): features[ii, :]}) # [f"feat_{int(bid)}"] = features[i, :] - TracksDict[f"frame_{int(dataset.frame)}"] = {"imgs":imgdict, "boxes":boxdict, "feats":featdict} + TracksDict[f"frame_{int(frameId)}"] = {"imgs":imgdict, "boxes":boxdict, "feats":featdict} track_boxes = np.concatenate([track_boxes, tracks], axis=0) @@ -256,20 +262,21 @@ def yolo_resnet_tracker( elif id >=0 and cls!=0: color = colors(int(id), True) else: - color = colors(19, True) # 19为调色板的最后一个元素 - + color = colors(19, True) # 19为调色板的最后一个元素 annotator.box_label(xyxy, label, color=color) - # Save results (image and video with tracking) + '''====== Save results (image and video) ======''' + save_path = str(save_dir / Path(path).name) # 带有后缀名 im0 = annotator.result() - save_path_img, ext = os.path.splitext(save_path) - if save_img: - # if dataset.mode == 'image': - # imgpath = save_path_img + f"_{dataset}.png" - # else: - # imgpath = save_path_img + f"_{dataset.frame}.png" - # cv2.imwrite(Path(imgpath), im0) - + if is_save_img: + save_path_img, ext = os.path.splitext(save_path) + if dataset.mode == 'image': + imgpath = save_path_img + ".png" + else: + imgpath = save_path_img + f"_{frameId}.png" + cv2.imwrite(Path(imgpath), im0) + + if dataset.mode == 'video' and is_save_video: if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): @@ -396,8 +403,8 @@ def run( imgshow = im0s.copy() ## ============================= tracking 功能只处理视频,writed by WQG - if dataset.mode == 'image': - continue + # if dataset.mode == 'image': + # continue with dt[0]: im = torch.from_numpy(im).to(model.device) @@ -482,7 +489,14 @@ def run( tracks = tracker.update(det_tracking, im0) if len(tracks) == 0: continue - tracks[:, 7] = dataset.frame + + if dataset.mode == "video": + frameId = dataset.frame + else: + frameId = dataset.count + tracks[:, 7] = frameId + + tracks[:, 7] = frameId '''================== 1. 存储 dets/subimgs/features Dict =============''' imgs, features = inference_image(im0, tracks) @@ -496,7 +510,7 @@ def run( imgdict.update({int(bid): imgs[ii]}) # [f"img_{int(bid)}"] = imgs[i] boxdict.update({int(bid): tracks[ii, :]}) # [f"box_{int(bid)}"] = tracks[i, :] featdict.update({int(bid): features[ii, :]}) # [f"feat_{int(bid)}"] = features[i, :] - TracksDict[f"frame_{int(dataset.frame)}"] = {"imgs":imgdict, "boxes":boxdict, "feats":featdict} + TracksDict[f"frame_{int(frameId)}"] = {"imgs":imgdict, "boxes":boxdict, "feats":featdict} track_boxes = np.concatenate([track_boxes, tracks], axis=0) @@ -535,7 +549,7 @@ def run( if dataset.mode == 'image': imgpath = save_path_img + f"_{dataset}.png" else: - imgpath = save_path_img + f"_{dataset.frame}.png" + imgpath = save_path_img + f"_{frameId}.png" cv2.imwrite(Path(imgpath), im0) @@ -664,23 +678,37 @@ print('=======') def main(opt): check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) - - p = r"D:\datasets\ym\永辉测试数据_202404\20240402" - optdict = vars(opt) + + p = r"D:\datasets\ym" + p = r"D:\datasets\ym\exhibition\153112511_0_seek_105.mp4" + files = [] k = 0 if os.path.isdir(p): files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) for file in files: + optdict["source"] = file run(**optdict) k += 1 - if k == 2: + if k == 1: break elif os.path.isfile(p): + optdict["source"] = p run(**vars(opt)) + +def main_imgdir(opt): + check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) + optdict = vars(opt) + + + optdict["project"] = r"\\192.168.1.28\share\realtime" + + optdict["source"] = r"\\192.168.1.28\share\realtime\addReturn\add\1728978052624" + run(**optdict) + def main_loop(opt): check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) @@ -725,8 +753,9 @@ def main_loop(opt): if __name__ == '__main__': opt = parse_opt() - # main(opt) - main_loop(opt) + main(opt) + # main_imgdir(opt) + # main_loop(opt) diff --git a/tracking/dotrack/__pycache__/dotracks.cpython-39.pyc b/tracking/dotrack/__pycache__/dotracks.cpython-39.pyc index 3cc23363c7954b87f6c636b57c6828951fbbc384..c349263b65eb5db5c0029614a7d36f53fa3d876c 100644 GIT binary patch delta 3158 zcmZWrYit|G5x%|Sk;kVbilRixvS^!rP_`vYmSNXXlQ^~ntFh&W6hE#u=o9ZmpCmrC zcd})54@4syah$qQympf&NFgC;Qx_;2mu-_2C{P3~kfN=dKF}YBqWx3!Z-4Yp;U7h3 zj+S2{vER+k%+Aiv&d%;%r~fn`)1uLk0KZTB(}jPZy%+nBEIwGzdpp~C%AXM{8h^q6 zXYznYJJU^cAL!98gfAhy$X^WH>Q7r2%ay!F?OueOZ4@hY($EV=Po-2?Gwha9@(-bb zVZOAdwf2TGAd&~QUxpOXe+=1Uwfm7}F?E2ZaO(5k# z3sSGtK5w9eoaN6o-RUVJr-V>Os36P&xMbQTXH=g}wJ9}#IAfGl{=cRz2?hdpEJB=XV-XZZILr@PhgLo2zWSyCg|(M`*K+=B^nRI?40Z*3`)3$?df zDpFVl1Fi%jTHx{21fjLLlqJPpMZ0?u4gv6;ou`i+#a<`E1q2OXcsv`YTd}tv;W>ms zgfAmtuv}k$#;njW>`x11T6c4B>>NRIHu^xrw|qpzK(Dm;Z1~dm?vB4VW3Yyv*G{>@sZG=(xB{6vY~;{ zfU?e$=ih2dkrKZlcJT9=CX(XiOn*XexhkkpJ*l_CH>S6Z3E6i3-OTSdVTwTj+u-BH z#r4mX%-K1k9;qy_U4CctJ~CdrznNj6Uf#Cl1uu#yqi?T{qhCXcp}u4pHZ1^^_1D7{ zggnSk=SXd;SIiR=REmk>Ssr_TuAp%}y`nQo! z_|g6hiE!Hg`xA>sU>;{#3kX-(#v7FUQ4V;qn$xo!n*IDRHA`0mJoA!f`b$x)VqAAS@$1 zT`}FTMyYnCf0AlNVUlytkIt?Bus8*wVJ=zw z!H!NG!h|V|nm8q4e3-E8V-N#JvSnDWFckpW2U3Okrh=%Yi#J6_wnI#+E)odj=sD5x zI{`avM_6Eyc)Wd%!U9auCEa&T#PFT-IYB4ngq;YJS%^ihi7d#%D3gVhpv!x~r$J%% z)a*;%RI)c!)SD`5i_0+>MW$tNC8qmXl*I-G;8uaHOVj9g!@DRM=(jk@HTGL=xw;%@tt?p}dO-Js zspb`#2&@TWqB?DB2BcuhI7_j%r}crG04u2#(P@V@wXpUT*bMllR>-utMCkZ+VL8cK zF!7ei>^QXaYr-Va6}=j~s(4`sPtn&!;&iN;Ho-c;p|qoT(s75G()p6_X`3O&No8tk?X=GLVb+9CC>WBp2+*GNIymkeE?7^ zA{VtwM$VXr``Xa!r^9H zmm1dGjNSWC4ZfE&%Pg1hMKnIV5H+pzLy^8eolVv6peHV9^z9D_TLIkgglS(kEhASd z+jJAMHzH(E7Bot8MWc!j3%Uilcn{Id2zV{Ikx6*M94wdY@*K6?=*gk+lewWY<3lG$ z4;*ou4<0@+GMYPZ{P@XZXL4gBM~@#Faw9s`F0YQD-Dqh$!c_z;Gj6~vUZy5Y_#^0L zJE7=Q;c5#VyJXNAtuhL42d@I%#{uZB3gxw!GfVhHtd}CZ*1X`;ex!CI3?S?QP=lae z*H-Y;k{R^^r?R6 z!a%Rr?@GW^;h{I8&%1$<`o7k4;0yG~yB~s&c6Qe8W}EqkQ|JT54Tirl@j#f1LqU*0{)PN9quS`E$O(Nji40MhcNeX%6O1eOU-Y1D- z^ZID6U_(k+*=hcwY{kPsAFzjN2R6~`6&Dhl*H^(e^l9V&%IYbip7ai)%aF66zN)aI z^T?+v-UEz=m`6-mGYjp@(1UO2o z^%HSX^>>$K#>@&o9No;@|9NtOBv09fpx$84`-ZGcyQq3&qq7zqb5%`9U$|rr*QvE> zishWQoAR!XMJX3s^D{XMbL@U*xMmg(rdkY>*3#^TNlMGa=ze@u3xGxVq{Q&JR>z-* zw-e}>5EWL6TPb^n4?w~&g*!cGT6`4#4A2Icbf@K<9|t6q`Ls~Zrs?j^pz}&=t-?lV zJhG1+ppPS=PzuRY5?XcXOoV$XH@{ub_mL4MoM77(R+*I1ajlBKMqg}+2TN0ppz7aJ zew=>YGRRI)BKmU8Ui4z6O~}7RIv)*(k05;vI1gL`t^gkaCul)!WW)3{8ukoJA$_g3 ziA~bGN;4&5wJb<4#d_Rh8?**mj6IIxQpvG2q7pk5-a^)Vt6-G(u1V!2ooVl5hn>6a z=djnUogI5hb3u&X_@|CvfVChYTzSLhy0kQJc?Om89>|Af>Un+(33RP9%x=<3=fyVo z+ygvQ!0$jPg+8I9TA1CYg7%OtP+$B_wp4m$%kkaps`Fd?A{)ZdYD{=^`>bW~3Y5MN zV9rIiZR!!euVhBfd z03T3g*F0OMce*~4-xW_pyk9~78XxDR6Mlu=Cu>_n{{yN1!DQs2G~_$QE``g=S=

)?yI@uZ}dSWafjj)Z?%9erI=+4U{H85dsqkBq<-I6bMNmDW6J^Xr>6uSv$K9_If+^CM-%6 zRY7W{o>HHBBkHBORn0%>sZ!f(rH+t#tBNWlF1_^9z8P<}Ap&(*^WO9On)lwkdGqbb zKSpw9Hk%UQ_eElVb@1SN?(Nmj;mZ=RU?EF%5f(X?umo6gWlzBhVlvrPy#!8hT5&Zm ziIYgMlXhwiJxy;23mFUT6>t_f*);?>yO6_qOSKYimsh|AuZW9at6G{{^15+160QkW z(n>wa3s%Z5tVy`X5@!TGz5e7QO-EF|V}`Z$XIIhv=n%SHv}c&cs)(J$y~uLf!85)}teQ8Y)+Z4xM_%Bq3Fudi1+EnCHfM=Ai_j937H|R}(;~u*P5oH2|UT6oq04mD(q) zp-@eQ`xv0UEgCF0KOH#In4sfH!qng=CdmkNbnw zn%`hCo@BrXZLblYjH4cG)T@sV1h0`ZRoklCOyl|A5c2bX0O%T(DvQIAtVe9qqEZkN znrN7$0X3&xQ#Yq4M#qh7)1$L9W1}}FXfmu3o9*vfG<$b)dSYhMxH5HXbb6Mi=A3ZJ z32ehBdV=f$=U8^U>JW#+R3dhW6LU`8B(0%s0yd60?IQvs!b@ES>K#}?27rLds4T$} zGt%R-5FaHrP3@tx4$zbdaf26z+D{1?)4hNFf?h>W6ej4lodR2BY}4Oz#5w zDn#Ea=Z2XS8Zi0Lv5X)zLz^4vh-VkucFaDA&5o2U4zD$hIv@29HgTSviP$6T<32R2ptXsMiC$HA{-J)uh z<4rNHUJrA^E7d9C{n+mdtuJaRr86>-m(jZTi990^9wXv0zA+_Z+kE^eQzt_V3^2gA zd3CQ!tUQ}Mg1VzmlOLiz(ND=c{pUOCyj@smRI9jHTp% z-UR_U9etI%gmTfZxyhqgG2u!M#b_8yj^Es!$Ozc1Y)$1Kh&F2g!g>(8^AbZML4ckM z06EoW5H_;5g=~b8%K&Z>)=qjJ7v@a3!JbX3wh@m+a+4{IGcXZlihHh20ZHYG-)K>d z4bLVF1Tx9w(@c^uNj31Hag&Vo9srzP@SS?wonDI5eLs3w98s7r9DQAU(D&*Q(ag>e zzT4_A?L}xTI@f)Cfb@cmF5^_iC-nw64g($?jIO4~8$?&)Ef%6LyAOB2>X;0I^|R>5 z?vICmf7?@M<+^=#yo53jN}hp=f#_*`NqPFx09BBaT{A_GeRXx1=zuU2f=HH;mp z>|-kYp0O97UA>?PKtq{~me+1*r?q2%6-^;bu9s=TFf6}f7&)rIKxwq%2G}2!dM~fC z_aUsg&K!|JnQDeH@3^*M&;)dX?=IT>0kPp=aH7(TUlJ9b6yOjx)3Ijx?cU-I$R1Oi u;v8cRCVSjDERY=;{8!_n+6B+Iny!6O1eJi*MpiTgx$u7xC{^l08R=i8tr2^L8dlR%m#o3w80$PEP4A`#Mx7E*~wz#_VAyz69}UGHjk zO&bY8r6R!vq-suxzaR%xP%cPCRS$4PT;RYQIBWAr=J!5k ze_#CS?0{1&>N)oN?ZaciCJ+?0-F@bp<*damt7^u zhpMH9nx*l5KF~wMGEnYv&Q)FQqnxXG>Rr()xWel>Q@{7kn`8v7qIF`UHT~n9l(P(G zA0&^^x{@mIqq~TV39G|-#=sP*&^SlY21OkV46#mb2`i{Vz7?)cQzh7lZhMKP1Rc*w z6RM=xX>TW3VMt3&wxXCc#72ob4uJPlOq9sB*xXx|AE2iVwxuZDj|>2gDlu&mc4bn_ zcc)K)S_hFa0IiRk$#8>el|0CwYy!eWRNMAlJ4v0?RaOeaXS;u~ZihysPAnu8}|WQ_H&uOU$|}P`QJ< zy^v~t!g>4cW5YC#hM8S@hM5@ilWegJ7NLVZFO1&Hl7CVntt`gR+6vOim|k!3q?Ik+ zKdNlLJ&itY3cl5b=em~;(A4~hnVHe1Lb`IR|G>GH6do6RiXw0&a z>lm+~8u{9|K5+)joRJ9{(4uNPaFWFD?2PSA7P=yuB%hV0YA{A7t8TL8#2%I1bSL(( zM7}F6pmFk7X$VC@j3EA`4TFUhvh4CRCciEoX?gf z75%7pw7@V|oxR534WS6l#pi$n!cFpdWqjlUYy^zh@&`N0tbOF=zi1d2aaXg3pi{ z=4M}e%$$w78{O)1{4eDG@T>oihnK zf4n2$C4xp@e8*DVDC;4X88G{v>fB;)urF-ZaENi}>c@3aUkD?&7kC#1hDZubNHSx< J{$*4U{{>W;S?T}) diff --git a/tracking/dotrack/__pycache__/dotracks_front.cpython-39.pyc b/tracking/dotrack/__pycache__/dotracks_front.cpython-39.pyc index 73205fda069cf19c35d9ef21320877bff369368d..e7a572ca0712905d9f78239109ebd7858827482e 100644 GIT binary patch delta 1729 zcmaJ>O-$TI6!zF&|17(kg(VP@5TKMKCIl#iv`x}Pg#R}D2$EK@Qdwp_`xo!p9eb%3 zK{*g9Ql(ZxbB!wKrRuSz$KHCYRH;&X8ugMR$6R}9-`GonDp5=RX6DU%Gw=Im=6!kj zrIE4I>7)SP4{A^G=dQiX#T^$$j$jH7azq!IC~8Ghf>v_nYQjt)0VlAsi}uw6p`tmc zOE7h?=(`A7y^=Ir9N9^@scPCxBLVt~o2i~L&mdt=a8yToCO8^S?@DIY5!VDm-yBO$ z5p0Lp>Gf)R_3N5}n7m?#rM++8M|bxA8fkNMC;3b;)#rjNm>R^i(6h)uH|afeEx9G^ zD+j`sV5zi%?sq02e?wy8@F+v;3>An52BOhl&1z{J3LPb6hx1rrn zY=MZ51Qz8$ej;v*+tQW<>B~!-{Xxm|`Q{@WtPofIa3oH{Duh*k*sm3j{)WYm{sAyl zCgnHAAkiw>!M4MsAS6unZQ_GYEW5C{Ix{(K&8<#8UYnYHFvIk)L@;dOIxPKgesyMT z-nzfAG`afNkVz*j$fR<$$RvV+{Cc@&lSWe~m?IJUv(UQ$f0KO&>JT?O9dDN8yju8> z*!icy@M{GRe-&S#KAkU&!vC(62J=<;Nrpa`z5b{>$j9-?@Lsed_j@Pyd!@G1>xs7~ zxCh*ZOgZ=jxZve_I0b5v)1q%2M|aa+<+A)eA`<;AaqiM-{_maizw+)d-%TX^NLfoc zunGj@s<(kp-S2K3|AjK$)66ty2Z5I_!)3NUFNYvR#!&-0%^H2LMbh zxSk!3k1-wB>QzkOM9B)Dkn^73U|InRRvn%gS>_>T6k?NN;KB8b*1p9}Efud^Yf59} zVhSJ7zSeQHLLarR$HgY15UG`djf%MG{}uhSbq(qCYTLjLFGYmENWzN_;VjA504!}; zk-~ZGx|Vg~MV;p^d{LqTw80-F0W_4-G*!E;jYf8(m8q8HczKAu1k)@5Wc@~@11~RO z$t|xFafeT7mQ^Ub*s_=kyghdVM+FGi!hosydR&PVC`n);eNlwtHE#tknPy8gfL8?t nB-V(hb|U9DLTGoYo>O=6T@gHjHqKm5Xb7?w+W-|Y1-*X(vCWH# delta 1301 zcmaJ=&rcIU6yBNL?oLZvgcA9o6$nsWKp`rMikOH*F2Df|+L&yX&K9cMZZo@R>V+6h zOiYX+bM@fC3l}dO^sn&fVGbsqjM0-PZ#r+fP%gDeU+2B|y?O8ZUgw+rs*rUvnUp3! zncsiP>Av0U$@y97QLU&^M6r*GnCjH<^{P=ckS5OwKT$P{Cem(e)TGu6jar`ZTrXM_ zuW2^feS3BH>lJj7_o1oOOU=-VmJBEPePp9a{v1v4Pv~Z^A$!=mz~`m~+7PJBEE!-H z@4}0yi?8DuWb)Ve9RG~R^D5N=`A%sv@*6_AhO+w8hKURd6vekm zhpD=>JPm-C;lvC^`r&v2OtUMT4sjz*+&j|4KbWI4fUuFsR>JkI!1q?SqXm~mZL13h z)p^byrm|n0=t%~yd!htsykBM+cvy<00(*nTTRS{AlQ!M zVyog!GJh=|Gaa*Y{!4F3)k?EsTP(-FCznh`A;)J@3zOp5fj-oUm|J=jHo2XKDp;o= zC11)>DK~od-=~)K%Xt}mn!5wop^_gCRl=b~#sW6HhZ%w@0N7Y0!Y#M%i3E)rb&p|w zCOd%!_?>KH5H==Ew_f*ZROnHK+Igl%8iVoz|CPOnQhd1cL64HTrS+)HukP=5F5?Ua z=w)ea#IEq4T_^jlgVP-0hY4Abd4 zkT6O?y&*{1bE9qMh3qDjEUOZiZH9sT(%W z08x(*n8RN(ghxzJ+9KD;G=r#3gylFiC^?QuNY?cR13?6#LA-b!+z#b9^JNy)VzNrj a)+$=Vleua@w|#FOwxK#234+rojsF3a0T2EF diff --git a/tracking/dotrack/__pycache__/track_back.cpython-39.pyc b/tracking/dotrack/__pycache__/track_back.cpython-39.pyc index 661bab5b9854605ea23e7a4b5321fe10a56cb8fa..4969ac8a96ac059c8157e20a911b25bf3f1cdbf5 100644 GIT binary patch delta 211 zcmexn_|1?vk(ZZ?0SIK`Wzt`6dMvKXtC7c0Z4mY3x delta 222 zcmexn_|1?vk(ZZ?0SIJrf2Doh$ZN^UdyC5{zceo;AhRMh=ho(6R(nRqTbn1dEnsK# z*lfc0kcrWDvzQ<&Gh^sv8IcH%FrZ#drlPdTH6rq&u|Uo(-pu0gjQpHb&phYEqLL!} z$s0sOMfHI)MFt?k5JVV(2;420AfX>LJfk?mwP2?fSmliek<8LcO8mT(3D=b1ZH diff --git a/tracking/dotrack/__pycache__/track_front.cpython-39.pyc b/tracking/dotrack/__pycache__/track_front.cpython-39.pyc index 08dd9f85f9d93401eb87b77a500152a7ff65e7ae..8baa7a1090cfb1a467026ed7b62fd204f03c0516 100644 GIT binary patch delta 412 zcmXAjJ5R$f6oqr`BzDs@eW*xObn1ZU!T=Lu=~gDhikGOwDYT>yxevqwLlF`KLzRIQ z{y|YB7!dOvfnT)03iQMwx{>s!TONmZuLX#j# z0D>%(3M#481HEGiqhksalH3SbhYJpx&;8T{Zmm15ot9A2gpI_W0IJ>22&b+|Q?d(9 zGT)p~k)1Yc(^)B1g;?Kf43E5NzIgv$2O!ZZc>-99KIsj*@XYXGJ^C;<0czngdqnig zwxAwaJRoRJtep(OeXB$PiX!3M`i0$ap8kfb=)ty<(zEje*WoPta$dmBz`=)8r^E}K z;oauCGLtWIF>_tn^ISJJx}#42ROJ!J)EjTaT=s^l;T;bYMW*y5V13*?XdNgcaolyA yt-!}&ru;!K82d><`Q70lX5L68<$P@ZqeYaE@GZAi%;CIlaNs0IY$yN+ll}pf;c5#2 delta 316 zcmWlTze~eV5Xax;z2rS(ei(64I(4!XL~s(TI0zN0qss#cAuldXQh8|yXG;eMA!KlJ z^FPSo=%TBmLvSx5ICOB;>mA(pK3~2ZH*e3{UgWwq!MYf~UVU$sUaQwbQbZC3%1A*P zs5GS+lMIMr%1q77l2*ng2hu0XQhZ7jPxvjBqP#!0JJ48z^CZfP@_iHHGT5jY^Z{sn zX1oCSwZk5eswXpnZf#q80Eg8Pe@5$<@54#GD^dgD(LQx4{?aitplDhx>^~S(i`6ff z)t`>N49&eCxT`+=w@U{zg@;2>uO-$6$375DTgwwG4G%*#io>um^GSAnqXV?1UkuYE n()muDPKt4&wSjIU)M%`ucvxtLs7)MIH`sOrF3c3*z#9Drf|yF# diff --git a/tracking/dotrack/dotracks.py b/tracking/dotrack/dotracks.py index e1341e7..a3ec32b 100644 --- a/tracking/dotrack/dotracks.py +++ b/tracking/dotrack/dotracks.py @@ -59,21 +59,21 @@ class ShoppingCart: @property def incart(self): - img = cv2.imread(str(curpath/'cart_tempt/back_incart.png'), cv2.IMREAD_GRAYSCALE) + img = cv2.imread(str(parpath/'shopcart/cart_tempt/incart.png'), cv2.IMREAD_GRAYSCALE) ret, binary = cv2.threshold(img, 250, 255, cv2.THRESH_BINARY) return binary @property def outcart(self): - img = cv2.imread(str(curpath/'cart_tempt/back_outcart.png'), cv2.IMREAD_GRAYSCALE) + img = cv2.imread(str(parpath/'shopcart/cart_tempt/outcart.png'), cv2.IMREAD_GRAYSCALE) ret, binary = cv2.threshold(img, 250, 255, cv2.THRESH_BINARY) return binary @property def cartedge(self): - img = cv2.imread(str(curpath/'cart_tempt/back_cartedge.png'), cv2.IMREAD_GRAYSCALE) + img = cv2.imread(str(parpath/'shopcart/cart_tempt/cartedge.png'), cv2.IMREAD_GRAYSCALE) ret, binary = cv2.threshold(img, 250, 255, cv2.THRESH_BINARY) return binary @@ -520,8 +520,7 @@ class doTracks: mergedTracks.append(cur_list) return mergedTracks - - + @staticmethod def join_tracks(tlista, tlistb): """Combine two lists of stracks into a single one.""" @@ -541,6 +540,93 @@ class doTracks: def sub_tracks(tlista, tlistb): track_ids_b = {t.tid for t in tlistb} return [t for t in tlista if t.tid not in track_ids_b] + + + + def array2frame(self, bboxes): + frameID = np.sort(np.unique(bboxes[:, 7].astype(int))) + fboxes = [] + for fid in frameID: + idx = np.where(bboxes[:, 7] == fid)[0] + box = bboxes[idx, :] + fboxes.append(box) + return fboxes + + + def isintrude(self): + ''' + boxes: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index] + 0 1 2 3 4 5 6 7 8 + ''' + OverlapNum = 3 + bboxes = self.bboxes.astype(np.int64) + fboxes = self.array2frame(bboxes) + + incart = cv2.bitwise_not(self.incart) + sum_incart = np.zeros(incart.shape, dtype=np.int64) + for fid, boxes in enumerate(fboxes): + for i in range(len(boxes)): + x1, y1, x2, y2 = boxes[i, 0:4] + sum_incart[y1:y2, x1:x2] += 1 + + sumincart = np.zeros(sum_incart.shape, dtype=np.uint8) + idx255 = np.where(sum_incart >= OverlapNum) + sumincart[idx255] = 255 + + idxnzr = np.where(sum_incart!=0) + base = np.zeros(sum_incart.shape, dtype=np.uint8) + base[idxnzr] = 255 + + contours_sum, _ = cv2.findContours(sumincart, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + contours_base, _ = cv2.findContours(base, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + + have_existed, invasion = [], [] + for k, ct_temp in enumerate(contours_base): + tmp1 = np.zeros(sum_incart.shape, dtype=np.uint8) + cv2.drawContours(tmp1, [ct_temp], -1, 255, cv2.FILLED) + + # 确定轮廓的包含关系 + for ct_sum in contours_sum: + tmp2 = np.zeros(sum_incart.shape, dtype=np.uint8) + cv2.drawContours(tmp2, [ct_sum], -1, 255, cv2.FILLED) + tmp = cv2.bitwise_and(tmp1, tmp2) + if np.count_nonzero(tmp) == np.count_nonzero(tmp2): + have_existed.append(k) + + inIdx = [i for i in range(len(contours_base)) if i not in have_existed] + invasion = np.zeros(sum_incart.shape, dtype=np.uint8) + + for i in inIdx: + cv2.drawContours(invasion, [contours_base[i]], -1, 255, cv2.FILLED) + cv2.imwrite("./result/intrude/invasion.png", invasion) + + + Intrude = True if len(inIdx)>=1 else False + print(f"is intruded: {Intrude}") + + return Intrude + + + + + + + + + + + + + + + + + + + + + + diff --git a/tracking/dotrack/dotracks_back.py b/tracking/dotrack/dotracks_back.py index 19956d9..dd5d67b 100644 --- a/tracking/dotrack/dotracks_back.py +++ b/tracking/dotrack/dotracks_back.py @@ -5,8 +5,15 @@ Created on Mon Mar 4 18:36:31 2024 @author: ym """ import numpy as np +import cv2 from tracking.utils.mergetrack import track_equal_track from scipy.spatial.distance import cdist +from pathlib import Path +curpath = Path(__file__).resolve().parents[0] +curpath = Path(curpath) +parpath = curpath.parent + + from .dotracks import doTracks, ShoppingCart from .track_back import backTrack @@ -18,10 +25,26 @@ class doBackTracks(doTracks): self.tracks = [backTrack(b, f) for b, f in zip(self.lboxes, self.lfeats)] - # self.similar_dict = self.similarity() + # self.similar_dict = self.similarity() + # self.shopcart = ShoppingCart(bboxes) + + self.incart = self.getincart() + + + def getincart(self): + img1 = cv2.imread(str(parpath/'shopcart/cart_tempt/incart.png'), cv2.IMREAD_GRAYSCALE) + img2 = cv2.imread(str(parpath/'shopcart/cart_tempt/cartedge.png'), cv2.IMREAD_GRAYSCALE) + + + ret, binary1 = cv2.threshold(img1, 250, 255, cv2.THRESH_BINARY) + ret, binary2 = cv2.threshold(img2, 250, 255, cv2.THRESH_BINARY) + + binary = cv2.bitwise_or(binary1, binary2) + + + return binary - self.shopcart = ShoppingCart(bboxes) def classify(self): '''功能:对 tracks 中元素分类 ''' diff --git a/tracking/dotrack/dotracks_front.py b/tracking/dotrack/dotracks_front.py index 60101f5..a61e39a 100644 --- a/tracking/dotrack/dotracks_front.py +++ b/tracking/dotrack/dotracks_front.py @@ -4,7 +4,13 @@ Created on Mon Mar 4 18:38:20 2024 @author: ym """ +import cv2 import numpy as np +from pathlib import Path + +curpath = Path(__file__).resolve().parents[0] +curpath = Path(curpath) +parpath = curpath.parent # from tracking.utils.mergetrack import track_equal_track from .dotracks import doTracks from .track_front import frontTrack @@ -16,6 +22,14 @@ class doFrontTracks(doTracks): # self.tracks = [frontTrack(b) for b in self.lboxes] self.tracks = [frontTrack(b, f) for b, f in zip(self.lboxes, self.lfeats)] + self.incart = self.getincart() + + def getincart(self): + img = cv2.imread(str(parpath/'shopcart/cart_tempt/incart_ftmp.png'), cv2.IMREAD_GRAYSCALE) + ret, binary = cv2.threshold(img, 250, 255, cv2.THRESH_BINARY) + + return binary + def classify(self): '''功能:对 tracks 中元素分类 ''' diff --git a/tracking/dotrack/track_back.py b/tracking/dotrack/track_back.py index 20512bc..463c642 100644 --- a/tracking/dotrack/track_back.py +++ b/tracking/dotrack/track_back.py @@ -49,7 +49,7 @@ class backTrack(Track): self.incartrates = incartrates''' self.compute_ious_feat() - # self.PCA() + def isimgborder(self, BoundPixel=10, BoundThresh=0.3): diff --git a/tracking/dotrack/track_front.py b/tracking/dotrack/track_front.py index 0d957dd..1b28685 100644 --- a/tracking/dotrack/track_front.py +++ b/tracking/dotrack/track_front.py @@ -5,9 +5,15 @@ Created on Mon Mar 4 18:33:01 2024 @author: ym """ import numpy as np -from sklearn.cluster import KMeans +import cv2 +# from sklearn.cluster import KMeans from .dotracks import MoveState, Track +from pathlib import Path +curpath = Path(__file__).resolve().parents[0] +curpath = Path(curpath) +parpath = curpath.parent + class frontTrack(Track): # boxes: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index] @@ -36,9 +42,6 @@ class frontTrack(Track): self.HAND_STATIC_THRESH = 100 self.CART_POSIT_0 = 430 self.CART_POSIT_1 = 620 - - - def is_left_or_right_cornpoint(self): ''' 基于 all(boxes), diff --git a/tracking/module_analysis.py b/tracking/module_analysis.py index 16050dd..03a5579 100644 --- a/tracking/module_analysis.py +++ b/tracking/module_analysis.py @@ -22,9 +22,9 @@ from tracking.dotrack.dotracks_back import doBackTracks from tracking.dotrack.dotracks_front import doFrontTracks from tracking.utils.drawtracks import plot_frameID_y2, draw_all_trajectories -from tracking.utils.read_data import extract_data, read_deletedBarcode_file, read_tracking_output +from tracking.utils.read_data import extract_data, read_deletedBarcode_file, read_tracking_output, read_returnGoods_file -from contrast_analysis import contrast_analysis +from contrast.one2n_contrast import get_relative_paths, one2n_new, read_returnGoods_file from tracking.utils.annotator import TrackAnnotator W, H = 1024, 1280 @@ -32,6 +32,7 @@ Mode = 'front' #'back' ImgFormat = ['.jpg', '.jpeg', '.png', '.bmp'] + '''调用tracking()函数,利用本地跟踪算法获取各目标轨迹,可以比较本地跟踪算法与现场跟踪算法的区别。''' def init_tracker(tracker_yaml = None, bs=1): """ @@ -143,7 +144,7 @@ def do_tracking(fpath, savedir, event_name='images'): # trackerboxes, tracker_feat_dict = tracking(bboxes, ffeats) '''1.3 分别构造 2 个文件夹,(1) 存储画框后的图像; (2) 运动轨迹对应的 boxes子图''' - save_dir = os.path.join(savedir, event_name) + save_dir = os.path.join(savedir, event_name + '_images') subimg_dir = os.path.join(savedir, event_name + '_subimgs') if not os.path.exists(save_dir): os.makedirs(save_dir) @@ -237,14 +238,15 @@ def do_tracking(fpath, savedir, event_name='images'): subimg_path = os.path.join(subimg_dir, f'{CamerType}_tid{int(tid)}_{int(fid)}_{int(bid)}.png' ) cv2.imwrite(subimg_path, subimg) - # for track in tracking_output_boxes: - # for *xyxy, tid, conf, cls, fid, bid in track: - # img = imgs[int(fid-1)] - # x1, y1, x2, y2 = int(xyxy[0]/2), int(xyxy[1]/2), int(xyxy[2]/2), int(xyxy[3]/2) - # subimg = img[y1:y2, x1:x2] + + for track in tracking_output_boxes: + for *xyxy, tid, conf, cls, fid, bid in track: + img = imgs[int(fid-1)] + x1, y1, x2, y2 = int(xyxy[0]/2), int(xyxy[1]/2), int(xyxy[2]/2), int(xyxy[3]/2) + subimg = img[y1:y2, x1:x2] - # subimg_path = os.path.join(subimg_dir, f'{CamerType}_tid{int(tid)}_{int(fid-1)}_{int(bid)}_x.png' ) - # cv2.imwrite(subimg_path, subimg) + subimg_path = os.path.join(subimg_dir, f'x_{CamerType}_tid{int(tid)}_{int(fid)}_{int(bid)}.png' ) + cv2.imwrite(subimg_path, subimg) return img_tracking, abimg @@ -267,9 +269,13 @@ def tracking_simulate(eventpath, savepath): # else: # return # ============================================================================= - bname = os.path.basename(eventpath) - idx = bname.find('2024') - enent_name = bname[idx:(idx+15)] + enent_name = os.path.basename(eventpath) + + ## only for simplify the filename + idx = enent_name.find('2024') + if idx>=0: + enent_name = enent_name[idx:(idx+15)] + '''2. 依次读取 0/1_track.data 中数据,进行仿真''' illu_tracking, illu_select = [], [] @@ -308,27 +314,34 @@ def tracking_simulate(eventpath, savepath): else: Img_t = None - - '''3.1 单独另存保存完好的 8 轨迹图''' - basepath, _ = os.path.split(savepath) - trajpath = os.path.join(basepath, 'trajs') - if not os.path.exists(trajpath): - os.makedirs(trajpath) - traj_path = os.path.join(trajpath, enent_name+'.png') - imgpath_tracking = os.path.join(savepath, enent_name + '_ing.png') - imgpath_select = os.path.join(savepath, enent_name + '_slt.png') - imgpath_ts = os.path.join(savepath, enent_name + '_ts.png') + '''3.1 保存输出轨迹图,若tracking、select的shape相同,则合并输出,否则单独输出''' + imgpath_tracking = os.path.join(savepath, enent_name + '_tracking.png') + imgpath_select = os.path.join(savepath, enent_name + '_select.png') + imgpath_ts = os.path.join(savepath, enent_name + '_tracking_select.png') if Img_t is not None and Img_s is not None and np.all(Img_s.shape==Img_t.shape): Img_ts = np.concatenate((Img_t, Img_s), axis = 1) H, W = Img_ts.shape[:2] cv2.line(Img_ts, (int(W/2), 0), (int(W/2), int(H)), (0, 0, 255), 4) cv2.imwrite(imgpath_ts, Img_ts) - cv2.imwrite(traj_path, Img_ts) + else: if Img_s: cv2.imwrite(imgpath_select, Img_s) # 不会执行到该处 if Img_t: cv2.imwrite(imgpath_tracking, Img_t) # 不会执行到该处 + Img_ts = None + + '''3.2 单独另存保存完好的 8 轨迹图''' + if Img_ts is not None: + basepath, _ = os.path.split(savepath) + trajpath = os.path.join(basepath, 'trajs') + if not os.path.exists(trajpath): + os.makedirs(trajpath) + traj_path = os.path.join(trajpath, enent_name+'.png') + cv2.imwrite(traj_path, Img_ts) + + + return Img_ts @@ -336,14 +349,30 @@ def tracking_simulate(eventpath, savepath): # warnings.simplefilter("error", category=np.VisibleDeprecationWarning) def main_loop(): + + del_barcode_file = r'\\192.168.1.28\share\测试_202406\0723\0723_3\deletedBarcode.txt' basepath = r'\\192.168.1.28\share\测试_202406\0723\0723_3' # 测试数据文件夹地址 - SavePath = r'D:\contrast\dataset\resultx' # 结果保存地址 - # prefix = ["getout_", "input_", "error_"] - + + # del_barcode_file = r'\\192.168.1.28\share\测试_202406\1030\images\returnGoods.txt' + # basepath = r'\\192.168.1.28\share\测试_202406\1030\images' # 测试数据文件夹地址 + '''获取性能测试数据相关路径''' - relative_paths = contrast_analysis(del_barcode_file, basepath, SavePath) + SavePath = r'D:\contrast\dataset\resultx' # 结果保存地址 + saveimgs = True + if os.path.basename(del_barcode_file).find('deletedBarcode'): + relative_paths = get_relative_paths(del_barcode_file, basepath, SavePath, saveimgs) + elif os.path.basename(del_barcode_file).find('returnGoods'): + blist = read_returnGoods_file(del_barcode_file) + errpairs, corrpairs, err_similarity, correct_similarity = one2n_new(blist) + relative_paths = [] + for getoutevent, inputevent, errevent in errpairs: + relative_paths.append(os.path.join(basepath, getoutevent)) + relative_paths.append(os.path.join(basepath, inputevent)) + relative_paths.append(os.path.join(basepath, errevent)) + + # prefix = ["getout_", "input_", "error_"] '''开始循环执行每次测试过任务''' k = 0 for tuple_paths in relative_paths: @@ -383,12 +412,12 @@ def main(): SavePath: 包含二级目录,一级目录为轨迹图像;二级目录为与data文件对应的序列图像存储地址。 ''' # eventPaths = r'\\192.168.1.28\share\测试_202406\0723\0723_3' - eventPaths = r"D:\DetectTracking\tracking\images" - + eventPaths = r'D:\datasets\ym\exhibition\识别错' savePath = r'D:\contrast\dataset\result' + k=0 for pathname in os.listdir(eventPaths): - pathname = "20240925-142635-3e3cb61a-8bbe-45f2-aed7-a40de7f2d624_6924743924161" + pathname = "放入薯片识别为辣条" eventpath = os.path.join(eventPaths, pathname) savepath = os.path.join(savePath, pathname) @@ -396,6 +425,7 @@ def main(): os.makedirs(savepath) tracking_simulate(eventpath, savepath) + # try: # tracking_simulate(eventpath, savepath) # except Exception as e: diff --git a/tracking/tracking_test.py b/tracking/tracking_test.py index 6ea8015..0122d6a 100644 --- a/tracking/tracking_test.py +++ b/tracking/tracking_test.py @@ -85,10 +85,8 @@ def have_tracked(): k = 0 gt = Profile() for filename in os.listdir(trackdict): - # filename = 'test_20240402-173935_6920152400975_back_174037372.pkl' - # filename = '6907149227609_20240508-174733_back_returnGood_70f754088050_425_17327712807.pkl' - # filename = '6907149227609_20240508-174733_front_returnGood_70f754088050_425_17327712807.pkl' - + filename = '153112511_0_seek_105.pkl' + file, ext = os.path.splitext(filename) filepath = os.path.join(trackdict, filename) TracksDict = np.load(filepath, allow_pickle=True) @@ -97,6 +95,9 @@ def have_tracked(): with gt: if filename.find("front") >= 0: vts = doFrontTracks(bboxes, TracksDict) + + Intrude = vts.isintrude() + vts.classify() save_subimgs(vts, file, TracksDict) @@ -113,6 +114,9 @@ def have_tracked(): else: vts = doBackTracks(bboxes, TracksDict) + + Intrude = vts.isintrude() + vts.classify() alltracks.append(vts) @@ -124,9 +128,9 @@ def have_tracked(): cv2.imwrite(str(trackpath), img_tracking) print(file+f" need time: {gt.dt:.2f}s") - # k += 1 - # if k==1: - # break + k += 1 + if k==1: + break if len(alltracks): drawFeatures(alltracks, save_dir) diff --git a/tracking/utils/__pycache__/drawtracks.cpython-39.pyc b/tracking/utils/__pycache__/drawtracks.cpython-39.pyc index 568c628aa989a5ddbb329fe761ad303f4bd43575..d1e5e1e46fce79aa4c628d66ebbb8e88231bc5a4 100644 GIT binary patch delta 1317 zcmZ8hOKcNY6!n{*Cys+iW0STHI7w4cBN3GbOQ7;Gv=9@bic&t(KstRHkDYPGGwz!q zaTl%o+mu#n!xa)Bgjn(sh%zjon|@X-sFYQuP8VIaS+Zf>cb}b)>c!ES^Ugi*-KXn! z|L8AA`)FTZD#3rnKb6|=OMRt&*;N~wp^-#|R3taq(3d`pROJK>=U@|gE!s;K6|y7x zQJp5QtGtdK;9zJC+NwI0xc{9)Cr_E>W0Pg`RQcqQ56Zg?7)>sc0l1btN6hF&vO)-j zb3F%$1lM{V4H=r#s97{*86FdpvxXirYRxypIW`0J)C4&VH&Q!EJ9?Iq37LWA-a&F5 z{_QTocf2F@w$$1g{8?;|)lJ5k63i`mIE?-{f**|y+DJ!p_*SPmnYMyho41)~yX-th&LD{JX=KL{0Cr}#YKGjJ zm@wc(Hji}oZUZi4N69_7l3gUXqRm@!N_!UFYj9f@9d`iLLRSzDybLZNUzu2~E66KV z6dtY%><$W7HwdEq9tsHE40bn>XeSWY6f6|g+#>92gm=cwHh;T8^L8&W&AZpO;9 z)w)#&_90pWl9oBsGy(uOo3paCAd$2BSf&9=|uh?9~&BA*_ zuZ~bO2r-B3dk(9cilr#}W@w6R%-I9aw|ZI~-Cu#dt+-u`h8%5 zX1O*WqhGh}*Dj$hgOY!rcyK0P+%87pQ?44C<-0y>vA5AOh42{R3BphCF#nN?vtz*E z@FX8l_N{Ikw)AxLqO zkkXP)HW6KA4*WARofEx@!%Kb;C!73{ah+KvaVwNY_sq+j7%z(#!SQP27edDk#@9Dg z_;Nh>zvZUuhaq<}@j98@!rBNI5pE%1Hw+IJ>!#`Pzo%*P3;ug_-++`RX+=MrdRNcr KhHmMbb?HBYv&LmZ&OiRYWRf8P;oO$$Hlr zZxZ(u(SiVRsG9x5S_dwaIfJmNa(R_RqR+4Za z{?V9$7m~7+pO7HT7V9>=x)^-@)P+lS?aXw|zEr#L=Ed4!fn%u^7{v9|6=3mysXBmz zSJEfI!1eUq5n+0*ro$qhrxQy7jPcHD|7Oy>K`QHNip*G9u-lD|t_V_kj?Z)MAH6MxGV z;YR#-_HzT~RPX~_eYOp2ia$bY=v1Hoxy=iI$rL_G01`IR#6HF+Lsbwsm>)kTGG5>? zF1v`_N>QX5oV8qeGkVJl_(^hC2|gvL5u717OR$KG`O^58k_v;fpy_g-xakFv%iF?o z!eBY{mtB4n@8+vVMN;1QVTCXEn$uagBg_St2d>ZODAFcS;WNbEBKQUm7WSG#?@mkz zJYOggJ#s|gQeg~k<5z_hxEJq!u9#e{lY0xk*HyKcP=R{&PLAa$*p{kY^W#1~40vUTp?UyLGauC~*+OzdDHXd)* zZ91qA)nWyPg;9^Lw%v-x@9)TPuv(ll7s>aMnv@~@uE@+^w}m{GM#cx!<|?j^=SH3% zb;zK^BC#(dY;LMvJ&ZpaISV^;Ucsk(1CwTee}eA5$^B|H^iEXgACdSiK|l~n5T@gC z$9LrzJ=}NPTqj+_Wa({a@(59t7s|xiY^-o7?g-{alkV;kffg<(sojV)H z8>m-v=ALu!J@?%69zT8K%eN=XZEZ0Pe{X!Te)7uElgR>GfAWgq!dQeeu2-$8=va(t z+~DC8+E|>MJaR%iIXBYa8Quj-C-3GxcxUV`o;|EDG11R+W8IvM_1MO8*6vUeZ`pXZ4KSbI=o|{_Xl#g?Kq=5 zH~;$ZCLP>@$9del+xRx0)Fw4ut4HfGHzd{dx%otBNncFV<1)SjCmi^)o{))UV@0F4 zBW4c*3-bsBv<`5@B$OhO12zvwPj0&SJ(=!1-c3P+ulEn9`*pfIJ*Vx^7TCRdy{(><(dCG2BbG9TQIbX^IQ=G! zdyRUyZq_4pO9_T`O~K|Bj@7_%5iu_tbdxZrC8-`QI7ZzpCBu+CE>jCia$*gZprE1HXk3(`p^V+LG>6hcu>s`vC*Fmvpi8%@)PXYp`jzfx+)!);@O5`_$af z2szpwjd!VAm@=g9iaX)1At#TSZknlR^^A!HZE|*##h+*k<2d{Vxw}P z{-y&NB124S9b{XwA~{>rX5@|+)`i4Jd;o{3Z~~XTGS?bVlUhAYXMD|oh9icw#@8%p zh*OucfcksqUvfKSPAU5t1Xx!+EW=Rzqj0Zwnc}IY(o;OQI?B^%jj{)2x6*YlbZv z5h6Ohe?`Y{^kZ&i%68oH)Qsa>69QuI=FTS(Z2H6k@J&T4`QZt>>^i&rD4lV*QkyL8 z^*tH8i*lrVviMmnLytZPddzz$F>F}O(!IA6JK31`RpQ@qDu#k=F9v>^ygyF-0&{Kh z)}=CJCNod8g7cEpMS%3{8%|~37S|KL6`*LsVwI{rQ+6K^yNIfk zW)My6@gk|K+0)*ZRFRdu`%-V~^NMs%Vwqv(aG-iik# zJe*rp92M0-O-7Y=PP<`MX-`O`4jx594a*2B?^7}YJf?7}&@V}2+4SP=1C2;fXb)@R zgV{acZv{B`CRVXFU~MwNljt?Rl5OCWL|7+yO0_>RMM)`2yQ1WIx*h_zkl2H&Xv%~t zN)e1y>7LyaNQpwcYg4PBAl=g{DkxPY>>t~6gAcDQPLkrW<94yee4drwwc*v!d3L$4 zooDM`)s><#8K0#VHg&o9xAZ0+&$ELDmoaU<-{@nn(uDa0i^hXA@#K zafmrH)e3yRTAuW+@{Ve?RK)?lLB8%M?k|h+X>JF!lvA0iRLi2`9uEld@v2>}VT?Ev zJ#@?-V(Be*?CgCVBWF8+#ya5cF$6~v0TE*f-TSN~-=G+YQIy~zVh5I8%fuWuv~-H? zAW)W3o#K7^CH|=|j@B6^O8B21w9HbccC8*li88oZp|XsotXW0TngyeVYBZ6UsraC^ zJPHSot1=n_D>&P-p&T8Z8@G5=IeJ32p;t1RF1-XlF`F)J!KIg_1w5`?JPQ0R^cE&h zC_D!Ix9BlU-llMKp~1u%ph?tzlcQ2jRFqh>xq?Id|~PyRV;n`mJ+Mym4;%^x4ykXWoC~+*9wJdH0ut z!gh~{+IOa>xkC%V;o~N zBqNqIDCgi8B8;++$xyxNr{_`0)g064k`|94ZkaTot~uy{)egP?0`koRUb z>-{;i*+A{u>isvfse1^;DR8_2rEJHp+0%l$l*Vc)E1CkCT2nRZQdd!xC@Sv7ydT?V zS0*2DcTit-(L*j)lE`8E__1lhm972oQEFQ+HFn<*8NFJcfoHU5s8k=IfPVJR{;D?ebpi-Lyf;eUezy?fmpib=ob#Phr#2 z_z`i`-p{kg%ET9Xn&Mch`3& z^+R{NQr$|GD$-3ZMUhIu{t0{__(;`HK+qo$ANiplgM<)&0SSd5AryhcId@#gNq4m8 z-gEA`=bU?6<6SQHz3AjO$b@paWtC{rDmnUGdA-lcJEaZP>X+Dn z)4xI3J*TujNJ|@pl6f7ll+(XPh_a32q2wq^4ud^(a+@3lTKb)2HAjT z06hZqJ0fkA$Zy2XXn%+O85m+*mu%(+nNc@LTc1_JZ$`hQw~cnB9c`OZMJ!YS-W`*8 zT;gC7;#(Yvl0ubt)jma302()e3n)ZcoSFuqI(%yyQ5v~XrK@_+ z9a4z=E&>bWdp}Y> zR(bYK($&}mEp}YRB|Q7Z!MO5x->1+bgf`3bTn9xm)F{xE;q9u6f5YSf(MG#KOYP$J zEdJm|n8TNsw`Xuy^M2aZ7H{z)DF0k0NTw|x0Nmq(CkaWm>ri;cJ)QDmUWcdPq5> zaKhCzBvF^b z0aiI4+ao3W60suw<{4;myg{5`Yt^#izl7C{=pJ_Iiy$N&g*&s}xcLByX2P>BTQ zj9YhFwttO{BUQdxM;bdO&Swvj?~8Y{6>?hqB0H2{h5-7|A#rR{O-`4y;xE~kkKKcu zj>+28V7erD!n@<^a%wc71;UIOR{)Q~6nTqB0XHR%)27Sybwjjzp6Ns=j*0lZ!aiM` zgldk4IJD2?76x?!Z8k}hFf0GcQy?h`d7q>yz#mFHCGoTze$EPyaEF!EszJcm0>j7mC)V$jd{*8(!b zUICI7_gZxqa=C8LTBcpESL+}-7}u#=^R47no7I}skwh!wxeIRHX0E>y5)!q#V>e(A zwZe+11s`S!Gk^T#&tT`F1_dg>RO3(!X{ZRS1gHy=Q*l*+k|t@@1Ui0ktP}XhNm9+h zy%!0)a`)4Lb7W$hk3l3mj{sd0M5Y~g_L`3_YIsd%C0eXmb3AX#U2v?#na2p57H$V)muvt|S>^rC_9%`BZ#{}_Eo7`u%DM13e zgo5ztkA1?vixQ&q&dO4gQC7u68GkXiW2EPgfO>}jD5NNTB!0Z3LT2y&ZpSV)toq&J z!OkOzVZ1htU|1a7HA4#G!(H{hYe<+wASG@kQ&IE}uaRl-qv7d`SK+uqLVKR%8UZiL zMl;21U|NmuN6(GzT<|KEcJhsHS9+o8`ObnD=F*F(N2XTTbHu6L>cmD3B(}cda`TF3^jzkqgVWCZ+h$~oaxc9hLR+yn{hL0YG%_cn-z1H WdCZK)q26>bUxz8w5A}$D=0: if len(boxes): bboxes.append(np.array(boxes)) if len(feats): ffeats.append(np.array(feats)) - - # with warnings.catch_warnings(record=True) as w: - # if len(boxes): bboxes.append(np.array(boxes)) - # if len(feats): ffeats.append(np.array(feats)) - # if w: - # print(f"捕获到 {len(w)} 个警告:") - # for warning in w: - # print(f"警告类型: {warning.category}") - # print(f"警告消息: {warning.message}") - # print(f"警告发生的地方: {warning.filename}:{warning.lineno}") - - if len(tboxes): + if len(tboxes): trackerboxes = np.concatenate((trackerboxes, np.array(tboxes))) if len(tfeats): trackerfeats = np.concatenate((trackerfeats, np.array(tfeats))) + + timestamp, frameId = [int(ln.split(":")[1]) for ln in line.split(",")[1:]] + timestamps.append(timestamp) + frameIds.append(frameId) boxes, feats, tboxes, tfeats = [], [], [], [] @@ -103,6 +101,9 @@ def extract_data(datapath): assert(len(trackerboxes)==len(trackerfeats)), "Error at tracker output!" tracker_feat_dict = {} + tracker_feat_dict["timestamps"] = timestamps + tracker_feat_dict["frameIds"] = frameIds + for i in range(len(trackerboxes)): tid, fid, bid = int(trackerboxes[i, 4]), int(trackerboxes[i, 7]), int(trackerboxes[i, 8]) if f"frame_{fid}" not in tracker_feat_dict: @@ -169,8 +170,8 @@ def read_tracking_output(filepath): return np.array(boxes), np.array(feats) -def read_deletedBarcode_file(filePth): - with open(filePth, 'r', encoding='utf-8') as f: +def read_deletedBarcode_file(filePath): + with open(filePath, 'r', encoding='utf-8') as f: lines = f.readlines() split_flag, all_list = False, [] @@ -179,6 +180,9 @@ def read_deletedBarcode_file(filePth): clean_lines = [line.strip().replace("'", '').replace('"', '') for line in lines] for i, line in enumerate(clean_lines): + if line.endswith(','): + line = line[:-1] + stripped_line = line.strip() if not stripped_line: if len(barcode_list): dict['barcode'] = barcode_list @@ -210,11 +214,106 @@ def read_deletedBarcode_file(filePth): return all_list +def read_returnGoods_file(filePath): + ''' + 20241030开始,原 deletedBarcode.txt 中数据格式修改为 returnGoods.txt,读数方式随之变化 + ''' + + with open(filePath, 'r', encoding='utf-8') as f: + lines = f.readlines() + clean_lines = [line.strip().replace("'", '').replace('"', '') for line in lines] + + + + all_list = [] + split_flag, dict = False, {} + barcode_list, similarity_list = [], [] + event_list, type_list = [], [] + + + for i, line in enumerate(clean_lines): + stripped_line = line.strip() + if line.endswith(','): + line = line[:-1] + + if not stripped_line: + if len(barcode_list): dict['barcode'] = barcode_list + if len(similarity_list): dict['similarity'] = similarity_list + if len(event_list): dict['event'] = event_list + if len(type_list): dict['type'] = type_list + + if len(dict) and dict['SeqDir'].find('*')<0: + all_list.append(dict) + + split_flag, dict = False, {} + barcode_list, similarity_list = [], [] + event_list, type_list = [], [] + continue + + if line.find(':')<0: continue + if line.find('1:n')==0: continue + + label = line.split(':')[0].strip() + value = line.split(':')[1].strip() + + if label == 'SeqDir': + dict['SeqDir'] = value + dict['Deleted'] = value.split('_')[-1] + if label == 'List': + split_flag = True + continue + if split_flag: + event_list.append(label) + barcode_list.append(label.split('_')[-1]) + similarity_list.append(value.split(',')[0]) + type_list.append(value.split('=')[-1]) + + if len(barcode_list): dict['barcode'] = barcode_list + if len(similarity_list): dict['similarity'] = similarity_list + if len(event_list): dict['event'] = event_list + if len(type_list): dict['type'] = type_list + if len(dict) and dict['SeqDir'].find('*')<0: + all_list.append(dict) + + return all_list + + + + + + + + + + + + + + + +def read_seneor(filepath): + WeightDict = OrderedDict() + with open(filepath, 'r', encoding='utf-8') as f: + lines = f.readlines() + for i, line in enumerate(lines): + line = line.strip() + + keyword = line.split(':')[0] + value = line.split(':')[1] + + vdata = [float(s) for s in value.split(',') if len(s)] + + WeightDict[keyword] = vdata[-1] + + return WeightDict + + def read_weight_timeConsuming(filePth): WeightDict, SensorDict, ProcessTimeDict = OrderedDict(), OrderedDict(), OrderedDict() with open(filePth, 'r', encoding='utf-8') as f: lines = f.readlines() + # label = '' for i, line in enumerate(lines): line = line.strip() diff --git a/utils/__pycache__/dataloaders.cpython-39.pyc b/utils/__pycache__/dataloaders.cpython-39.pyc index 95b4e573f97f690d064ba4e26f54ee36ad0f10bf..c77703a2a567690df93ad96b03849d7c5af821f1 100644 GIT binary patch delta 4850 zcmZ`-c~F#B688--1B@JU2%-sip%X7q6h)Io0r3C?M2w1#0}L=Smv06+)NmLcXCl5a#-!$ERpi?J_9}mwv{ltR+_iyNQ|Dk2 zOnZlQiGhJ(!wo?qDKIiMPl#E8`cd&h%%k?vjiQY1j@~Fn23jq@4HA`s>Ep&3L~Fn? zAwdW&(4MwXh^2v#)3*qbPb()CMwg0y{bUs28vB%3y z{0Py3kqD#9`8g(830<8N&YYtAQZr@F=@7}xnQ4-3y*l*i+#KVE-l2FzSJ>Fc8KZML z`#57MYo6DzM9}HZfg`dF><;NXX^^ba&%wWJEx6j@tLNyOFsp^Wn0G>yQ}_HKQ(a)_ zD+Uz`5P)_-B~R8u=j0U>TNjlUa2lESBw!nTK0i~eqojffiCtjyK+|01aaY;=b~pcI zCD{scLzHqL&6^hTWqFh4f3)3S@c&v$PJwr$6zlD!-9%mFy&qCl@u(#4C z7t4Dmn54%0^>5Eg#J)6Bjgm5#Q{_4RzsSP$$LTL$B*ZXHbO&B zE2t!ZAJ70;18~#t%O*st!a+cCEx=2LMJoqmAzHA{Z+FQcM3DrD0=!A<7bTi|psFH$ zl6n>;8kM)V=$%E0gZDvH0o8+`I+$Whwl#L%oZE{=&3FggaKL%M1%UDl8=PH_$0@IZ zG&x+>8i&)a8Jv}R8oH>geCVKan1R&mpptxuc{b`UKPKAgv(ZCDSD<9^gdnkl+Lu<* zPws+DI9%_cVG|kYMCYOO- z1_ia(pMrWCyo=DH_KHzrd*EcnC6RO*B9-+spyohH`P)yMR^(ZR{I|Q3abAEs0FHBQyX@dV~M3v_7d!l5xaBJNGb04Opuzf(j%+w z(vqa7{5crdz%w*cRh`FCW%o(7XM_c1L-+<=w~sXI8-j5cMb;!5p9guH#?~Z_`jJV^ zasE(x^bA5V4#QH2}SAzB*Nq% zJ*!BlOoG~j$wb;ZARP>;s>>9>{OB+834NThLaSp84}kPwUI{H06bh-pfVrm8y`` zfxybNV27)A+WnFw;fc%A_W*$;!*dFtujd-&I55=783Sr8K;I%-;29{!1Zq4nqExjh zRUmB$Z~@!Xg7_w|nDhg!PQ-igk`FnKr5@SeL8(mlrF##nGzC8JmK&m{!QIb*(tM?& zhc@`8x6X%;V1KjME~i4v1WPIec(+9Vf5&J$T9Ct)p&3=3W@sT^o4+nYGx_RlUOQ^F z&cn&`=nF*#p`I>(9&7}AQw4gGqiv;mew%R6%l;H`jXv}zwW^k!!XhnVZc$lYskJC? zW@+BSGHK*kHM7pkSu|^Ijx>Q2sdEZu7Ue8murNc5l{R;^)#2uQ)N6Iw{dJydpSs~B zt|1x1mL+bPn%Pon-i67?_CYP!SKC>?&gB~q`4Fi5a1@^gTymfmj7 z-K0|8PlHuY)py~2=&3qY1^N+nuN^9m(do6BqLOZ{O%~Szq3aTY#AQlpE#w~QZf&2o z1c??lxe9aL3`Le$J(B{Q3M{+_L8}A%Bq()iT{ODwp;lGmDkEi(l=BoVM6E(IOS{wN zcdW5n{T|t$%~;I(x~kwY(&l!1{QM%SwtC#oX4$t`v;!^3-{jX<(sxvUq4D?E!fXwG zkI(CL_}`_=Z6if};M=yhg2lh-%*NH?3H4n44`p|Dn(y}ZrWm@?Sw1ud3(H2(Xp?*! zcYs!%WI04H@!UjO*%d7Ak-h6ioO)UI9Y@?CMtXeH3Vt#EdehX_8%UOVF{-m{#IUzA zWCVr}I8dDnl^}=1a}Pi@yXD{&1AYKhvcM7}--cWjf(_xR2V>u3rtCaaF92=SM9|=>oTo1}z#-6oMdy~WG zw|eT8J1xX*bJ=}-11ab0Si=&f8LbWO7Kc}_qjH+xJ=wehQU8cV9Hytb=a^Mo8^AnA z*SizMh`_hq#lol#Og9y5A0%c_&Grw{dkc_VY=*k36txkXFhsaoI3ES>49zx|b4tMW zOohnVjWv#dJDfhb4WLf)7M}FedqQoMGOFzL?e2P3n)w>}MWlzRFDJ5{GWNfkI(B4d z1;av|Z0G;<1(tydgD=#-{&4;ahJOZxLaLg<4f=k^uqo(lawPyq=BFnT(xI7LwkB(} z!=;7PSl#wUeHX)_dXCDTOBeG4ozHDD2pdK1UM|LwYj-l!+jbX-jil{P5>~pi`>?n` zyY>uAP;EtdMUFXkb&tQNKEdkf)}FbKs@J)CIkRhMfT4x)&0$rAqCODre;jGy4Q_QS zIPKN)5F4b{0r9k9Z{9?#T{HSUPOb@7R$kV_kXU08awlT%ODhhIkM8Xqn^MUtE#W?H z0KW07d>(g;X`yWme{QG9&sf4QHio<2A@Zqx2z<&lv2<$lcvM@b`P`3-ZIX0Gx zUBR@XXL{^b<`wbYs>7@9*loSN@c_Nwb5NY3y2GhrSK#Tx14QfV@cSAd1*=Jc#Q=ut z&g=kBHA`FxazCVJn9@S4*7$QgPLI?gauyUUD7DVYFP&JLKTDowDZiK=_yAB>T^LM! z%@#GI&*t(v?Obb=d?X?yhy{lz{Yd4bpyUT|s>)Eoc??yXdzGc#NQ{wLoS4waPzm0b ze}jsHE+6@Z7kA<4pD*^QYbYg5nFKpxoSo_J;{{aYV B`vCv| delta 4742 zcma)Ad32Q370;VB3t1q6goHp?BpI=g4YNQM3<+BZ*`*;cOp-}5WHK|n83GBx1e8b+ zQMgbc2qf$Xg8JE3MX-7f#Z#3k^tAR@r$c&No~GJ%uiM);Rg1Fr zBnboU826}%(~`#q46QMBxDa!wGi{Y9r+=kw5aYED+i9a%Mhosu&`N$b z+#uF!4O8NUsL?vpD}-32y_>N~h$32?wIHTUgoMawfJpPi}}e zmoWa3mN%zTs=%DL`ox((|MeU-D%4WxtyC{sTv*(I+SP3hAS5#n? z4#+AB{kN!)qUVGwehTH!=@7{*Gt(-&Zp(<&AZ7g^B`rvKiY3e+m zSVSwTW6d|&%KPZ-ykyHN2(;5D^Nxrr>YYC-*A2og4;c%^QUG)V)bU0=bWUM$slBSa zm;=PLhXFRz<@q^c4Gk}zlF$P}KO`gSy`FkU!0F*%>7d%;0#Qo+#pgsDH7=MN@el;J zvY=c59QhL2? zy7?rdC7PjpvmsqF`n7TLTW-verOwb~n9 zZl`W=*U5V!)3JPD`7;y{=H^E?{HH#-qMhVCl0UiVKSzt#2bS#Gh^a7~+ zvW(tYJZ|_7u!n*OcjZ@D-Xv4)#BxN^7Nbf+H@iFy&egY$f=JVCHT!v~8=BK~!>V+- z7~~2FC|}LwW(r@cChxGNIV6WO4f70=0HeUu$vxUIxBlVi0CKsd4Lxf z*usM4Sq8#2$S-0{H#BD66zo%e z8t>2@yyY76u4>`SW}3u5skmX5=%lR;{?VUd%T)m7Chk)3!k{Y*r4qM|<~!4Zc^hFq zW|1m`BeHZ$omVzEr7ba-jO|eIFz?Z=_03*az0)sMHVF!6fcYZ*#d()4m@2GQW2gZ; z6+qRq5sU$V>onj=O#LsTHCA;U{Mj{OerHpw(-ZJ#>xTAv-LSeoct#mDH%`w}nw8Q! zyMoH#pp(rwN_ncf!q=?-bM12DyCxAuCtJo?q7BTCp-U|tVm>>+Rt%%J-J{}uA~t38 zt$WSg6CoeLz-Ey1p!*y^B&dU-#8{}Ifz~BSbw*}jD#%!f!~q))EO;-vMnKcZU_=p2 znE>XG1Bv*o2X`d6sxlLS{U5PaX;K0HI}}`pDrH6R)}8VU>AExKd4n;}Q)ckZ@bS&? zNmaG#*1%)k#G^CsR^75GoSO{J5&y2v;vIk{`OC#VddNSmYaX0i76<}IAaW2y?g_xKuA=R zMvIZSQZj>_E3?H5)VH#b-*F$W%oFjuxb*!)cteO`m>43$`QHQ~=V|GyJO&=R``W!i zqv(!%L<~*uD3ACh)D%GVEZWtP+tq~i9Gq7LY&|el!lSX=39yTSZf)c$31npkd(K{n zeF!Gr1iHDkrNJfTL{Ohm1j*CTEe}BgcLDQlm2?H1wy0pign`3Z%nOm9098r8Jr#7T zyH1Tm)w+X8dlO=+v{g-Cpx!lO#Q{3LCTDCFyVDU>@2&S{Wads$quy^bHofyUey-gw zFKWilc%!&LNnH!LJ-fQrH5T(0T{xr*T3Z>E*kXej5TGnXfcoK%>K2axQ!PV1fe|T$%U49>n zU_;W4fz^Saql3L*5k1?TEY8te-FJzl+Kuj4OyUbVv0;T_?cf9XOPbcxL&rCS8D)%t z-kTjuANM>kE(V^=fHf@)_(iZDxXN%TSkFK~lK6_2J^E3cddL<-Ky`79Dic*~T*|N3 z7dGa0T|sEnt5sck3zoMlL`GuyBkigqq9WvYXzd43eYF;}g#h0I)G@<0Or8N>)k7t0 zQ;*icCsgHGh^jZ#-+-x_QQ9!zRh96NbUXZh9+Izvq=try;%O|EXUa$A-esnJL}z;K zhRgT_iPxrX(v3MKe6k+uT=WhpL({;iBNX6fvDbb+Y(J;oOWYNsYp?G=W#aRn9z;5J3owiJ~^KJea7u7 z9TQy1sKjyhhu0jf+=(M;(jcfB%FyuRaAE>94!y4pZM2ZHNs2 zLShx&*Ed3u^>Dt~_*iv28{{@>>|2rvCv{7}>*k{10ICN`CvVh6n@z}G7(N(4+?o~i zMc+Ls4i@PM_w!ZvPhRf#dOEBXjyC?BPmv$sdL4NgO{lEgZ2vm1C~{EX7^}ug+AP!7OBb(2C-kgH1z3sv6yU6UyI(v(jC^6 zV1txfX~Ldy{Eaw!PjWiGm1GZ;Dc$OLr5lOnZ$MQt@57W+0By8=PpWu`UfpwFSPM%O zaTL19xOaTgOz1$JNz_qLVi>g z9fO=V{=(Zj|g~WeRG@3@A7)&3Yv7}0Z~Jq zBQNuR%A_65=OI{sblMm-cJ{!6c~Fd*D3=1D&B^6-=4g@?R>~8|#0dK0Xpd;3&SMk* E2S_W_>;M1& diff --git a/utils/__pycache__/getsource.cpython-39.pyc b/utils/__pycache__/getsource.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2253c09642764b26383502212488666406d76a78 GIT binary patch literal 1740 zcmb7EL2nyH6rP!#U9UH`8wvt~pg0sNT!iJMsRULO71aXe&{QH7NUK!V#?W$;=OUK#7H2q(tI;--Z%5!y!ZBb)v80_d;RNb z@Rv!*Zz$aS7zmGGq%8nJTq3Ao5p}7anahOX8iKoAn8JEPTvL>U4YMWKE@_k}>&|28 zdzmll@u>bIOiz1R{Xwg~_MpA?Rr}%B_4U^JhT}Z)j@@5NZ|)D1 ziP=0z`~ZZ5Bv3{&0?5L?;sAgz{rv=X`$Nt5#vu@cun#!g7$Xie#u}52iP6kb*nT-2 z_bA%A`f4zjVHhydd!PwOL327K8O_*~WyS%YnmIiFf?d!PdO;~L0>a2SJ2$RYS!HfUDKs%Pv3d_84-s*_@i@=vUAP6^14SbAgU zPKTV(V(E%)Xtn{`3Z!txuAEYktD0TW>}tN0SNja30|~C}Ow9W_Go`-thW@sY7*<8Q zinbJ8TL?00u%6z=Alm?EBqu9mmt4}FO8`X&4XR3+H%!uQOICpK=ck?aUdPY;UiM6S zy+Jq%_KvbJO7{XkOXDNi^P9=BvNt1dxG%h|&*01-Fvw&T2;Fe#1%5a2LYX!M3S<;Nh&ySU~2u-*Cg z>Gw~*dA2L@Z;^F?hN;Xkrp7IgMd-&mOEyLd^CQ1N-j@iv*fd;+WgH4Wp8aI_#1z#S zmKt4#pu9`vLy#LKRqA#{-0OC=Yw{!5be-)d-_4y_hYcMg&A(yk+MK;PjKxvpZ(-?b OJKVCV%{b-E{^V~mFO66L literal 0 HcmV?d00001 diff --git a/utils/dataloaders.py b/utils/dataloaders.py index 26201c3..4ef1874 100644 --- a/utils/dataloaders.py +++ b/utils/dataloaders.py @@ -257,6 +257,8 @@ class LoadImages: videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS] ni, nv = len(images), len(videos) + + self.img_size = img_size self.stride = stride self.files = images + videos diff --git a/utils/getsource.py b/utils/getsource.py new file mode 100644 index 0000000..6c918c1 --- /dev/null +++ b/utils/getsource.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +""" +Created on Wed Oct 30 13:18:59 2024 + +@author: ym +""" + +import os +import glob + +IMGFORMATS = '.bmp', '.jpeg', '.jpg', 'png', 'tif', 'tiff', 'webp', 'pfm' +VIDFORMATS = '.avi', '.gif', '.m4v', '.mkv', '.mov', '.mp4', '.ts', '.wmv' + +def get_image_pairs(p): + files = [] + files.extend(sorted(glob.glob(os.path.join(p, '*.jpg')))) + # images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS] + + tamps_0, tamps_1 = [], [] + files_0, files_1 = [], [] + for file in files: + basename = os.path.basename(file) + if basename.find('frameId')<0: continue + + + f, ext = os.path.splitext(basename) + camer, tamp, _, frameId = f.split('_') + + if camer == '0': + tamps_0.append(int(tamp)) + files_0.append(file) + + if camer == '1': + tamps_1.append(int(tamp)) + files_1.append(file) + + idx0 = sorted(range(len(tamps_0)), key=lambda k: tamps_0[k]) + files0 = [files_0[i] for i in idx0] + + idx1 = sorted(range(len(tamps_1)), key=lambda k: tamps_1[k]) + files1 = [files_1[i] for i in idx1] + + files = (files0, files1) + + return files + + +def get_video_pairs(vpath): + vdieopath = [] + for filename in os.listdir(vpath): + file, ext = os.path.splitext(filename) + if ext in VIDFORMATS: + vdieopath.append(os.path.join(vpath, filename)) + return vdieopath + + + + + + diff --git a/说明文档.txt b/说明文档.txt index e9f2775..90b9d34 100644 --- a/说明文档.txt +++ b/说明文档.txt @@ -1,4 +1,4 @@ -三个功能模块 +五个功能模块 1. Yolo + Tracker + Resnet, 其中 Resnet 的实现在./contrast中 track_reid.py @@ -12,7 +12,8 @@ 3. 比对分析模块,目录为:./contrast 2个场景:1:1,1:n 1:1场景: - (1) OneToOneCompare.txt + (1) 利用现场保存数据进行比对 + OneToOneCompare.txt(现场保存数据) one2one_onsite.py (2) 利用本地算法进行特征提取 one2one_contrast.py @@ -23,6 +24,29 @@ feat_select.py +4. 整体流程仿真 + pipeline.py + SourceType: "image", "video", yolo+resent+tracker模块输入数据类型 + + + + + + +5. 全实时时间切分仿真分析 + time_devide.py + 需分 2 步运行模块: + (1) runyolo() + + 该模块调用 imgs_inference.py 中模块 run_yolo + 后续工作: + 1). 将run_yolo模块与track_redi.yolo_resnet_tracker模块合并 + 2). 图像文件名标准化 + + (2) show_seri() + 该模块调用 event_time_specify.py 中模块 devide_motion_state + + 具体实现: ./tracking tracking_test.py @@ -50,6 +74,7 @@ (b) 本地tracking输出; (c) 现场算法轨迹选择前轨迹; (d) 现场算法轨迹选择后的轨迹 + do_tracking(fpath, savedir, event_name) @@ -57,10 +82,10 @@ fpath: 0/1_track.data文件,并核验是否存在 0/1_tracking_output.data,若不存在该文件,直接返回 None, None savedir: 在该文件夹下会建立3个子文件夹及一个png轨迹图 - ./savedir/event_name - ./savedir/event_name_subimgs - ./savedir/trajectory - ./savedir/event_name_ts.png + ./savedir/event_name_images, 画boxes后的图像序列 + ./savedir/event_name_subimgs, 轨迹分析后的目标子图,无 _x 为PC机结果,有 _x 为平板单摄像头轨迹选择后的输出 + ./savedir/trajectory, 存储每一个轨迹图 + ./savedir/event_name_tracking_select.png 或 enent_name_tracking.png 或 enent_name_select.png outputs: img_tracking:本机tracker、tracking 输出的结果比较图 @@ -126,4 +151,27 @@ main(): 循环读取不同文件夹中的 deletedBarcode.txt,合并评估。 main1(): - 指定deletedBarcode.txt进行1:n性能评估 \ No newline at end of file + 指定deletedBarcode.txt进行1:n性能评估 + + one2one_onsite.py + 现场试验输出数据的 1:1 性能评估; + 适用于202410前数据保存版本的,需调用 OneToOneCompare.txt + + 标准特征向量生成 + std_sample_path:图像样本的存储地址 + std_barcode_path:对 std_sample_path 中文件列表进行遍历,形成{barcode: 图像样本地址}形式字典并进行存储 + std_feature_path:调用 inference_image(), 对每一个barcode,生成字典并进行存储 + + + genfeats.py + genfeatures(imgpath, bcdpath, featpath) + 功能:生成标准特征向量 + 参数: + (1) imgpath:图像样本的存储地址 + (2) bcdpath:对 imgpath 中文件列表进行遍历,形成{barcode: 图像样本地址}形式字典并进行存储 + (3) featpath:调用 inference_image(), 对每一个barcode,生成字典并进行存储 + + + + + \ No newline at end of file