initial project version!

This commit is contained in:
王庆刚
2024-05-20 20:01:06 +08:00
commit d6f3693d3f
483 changed files with 60345 additions and 0 deletions

0
tracking/__init__.py Normal file
View File

View File

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,466 @@
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 4 18:16:01 2024
@author: ym
"""
import numpy as np
import cv2
from pathlib import Path
from scipy.spatial.distance import cdist
from utils.mergetrack import track_equal_track
curpath = Path(__file__).resolve().parents[0]
curpath = Path(curpath)
class MoveState:
"""商品运动状态标志"""
Static = 0
DownWard = 1
UpWard = 2
FreeMove = 3
Unknown = -1
class ShoppingCart:
def __init__(self, bboxes):
self.bboxes = bboxes
self.loadrate = self.load_rate()
def load_rate(self):
bboxes = self.bboxes
fid = min(bboxes[:, 7])
idx = bboxes[:, 7] == fid
boxes = bboxes[idx]
temp = np.zeros(self.incart.shape, np.uint8)
for i in range(boxes.shape[0]):
x1, y1, x2, y2, tid = boxes[i, 0:5]
cv2.rectangle(temp, (int(x1), int(y1)), (int(x2), int(y2)), 255, cv2.FILLED)
'''1. and 滤除购物车边框外的干扰'''
loadstate = cv2.bitwise_and(self.incart, temp)
'''2. xor 得到购物车内内被填充的区域'''
# loadstate = cv2.bitwise_xor(self.incart, temp1)
num_loadstate = cv2.countNonZero(loadstate)
num_incart = cv2.countNonZero(self.incart)
loadrate = num_loadstate / (num_incart+0.01)
# edgeline = cv2.imread("./shopcart/cart_tempt/edgeline.png", cv2.IMREAD_GRAYSCALE)
# cv2.imwrite(f"./test/temp.png", cv2.add(temp, edgeline))
# cv2.imwrite(f"./test/incart.png", cv2.add(self.incart, edgeline))
# cv2.imwrite(f"./test/loadstate.png", cv2.add(loadstate, edgeline))
return loadrate
@property
def incart(self):
img = cv2.imread(str(curpath/'cart_tempt/back_incart.png'), cv2.IMREAD_GRAYSCALE)
ret, binary = cv2.threshold(img, 250, 255, cv2.THRESH_BINARY)
return binary
@property
def outcart(self):
img = cv2.imread(str(curpath/'cart_tempt/back_outcart.png'), cv2.IMREAD_GRAYSCALE)
ret, binary = cv2.threshold(img, 250, 255, cv2.THRESH_BINARY)
return binary
@property
def cartedge(self):
img = cv2.imread(str(curpath/'cart_tempt/back_cartedge.png'), cv2.IMREAD_GRAYSCALE)
ret, binary = cv2.threshold(img, 250, 255, cv2.THRESH_BINARY)
return binary
class Track:
'''抽象基类,不能实例化对象'''
def __init__(self, boxes, imgshape=(1024, 1280)):
'''
boxes: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]
0 1 2 3 4 5 6 7 8
'''
# 不满足以下条件时会如何?
assert len(set(boxes[:, 4].astype(int))) == 1, "For a Track, track_id more than 1"
assert len(set(boxes[:, 6].astype(int))) == 1, "For a Track, class number more than 1"
self.boxes = boxes
self.tid = int(boxes[0, 4])
self.cls = int(boxes[0, 6])
self.frnum = boxes.shape[0]
self.imgBorder = False
self.imgshape = imgshape
self.state = MoveState.Unknown
'''轨迹开始帧、结束帧 ID'''
self.start_fid = int(np.min(boxes[:, 7]))
self.end_fid = int(np.max(boxes[:, 7]))
'''5个关键点中心点、左上点、右上点、左下点、右下点 )坐标'''
self.compute_cornpoints()
'''5个关键点轨迹特征可以在子类中实现降低顺序处理时的计算量'''
self.compute_cornpts_feats()
mw, mh = np.mean(boxes[:, 2]-boxes[:, 0]), np.mean((boxes[:, 3]-boxes[:, 1]))
self.mwh = np.mean((mw, mh))
self.Area = mw * mh
'''
最后一帧与第一帧间的位移:
vshift: 正值为向下,负值为向上
hshift: 负值为向购物车边框两边移动,正值为物品向中心移动
'''
self.vshift = self.cornpoints[-1, 1] - self.cornpoints[0, 1] # 纵向位移
self.hshift = abs(self.cornpoints[0, 0] - self.imgshape[0]/2) - \
abs(self.cornpoints[-1, 0] - self.imgshape[0]/2)
'''手部状态分析'''
self.HAND_STATIC_THRESH = 100
if self.cls == 0:
self.extract_hand_features()
def compute_cornpoints(self):
'''
cornpoints 共10项分别是个点的坐标值x, y
(center, top_left, top_right, bottom_left, bottom_right)
'''
boxes = self.boxes
cornpoints = np.zeros((self.frnum, 10))
cornpoints[:,0] = (boxes[:, 0] + boxes[:, 2]) / 2
cornpoints[:,1] = (boxes[:, 1] + boxes[:, 3]) / 2
cornpoints[:,2], cornpoints[:,3] = boxes[:, 0], boxes[:, 1]
cornpoints[:,4], cornpoints[:,5] = boxes[:, 2], boxes[:, 1]
cornpoints[:,6], cornpoints[:,7] = boxes[:, 0], boxes[:, 3]
cornpoints[:,8], cornpoints[:,9] = boxes[:, 2], boxes[:, 3]
self.cornpoints = cornpoints
def compute_cornpts_feats(self):
'''
'''
trajectory = []
trajlens = []
trajdist = []
trajrects = []
for k in range(5):
# diff_xy2 = np.power(np.diff(self.cornpoints[:, 2*k:2*(k+1)], axis = 0), 2)
# trajlen = np.sum(np.sqrt(np.sum(diff_xy2, axis = 1)))
X = self.cornpoints[:, 2*k:2*(k+1)]
traj = np.linalg.norm(np.diff(X, axis=0), axis=1)
trajectory.append(traj)
trajlen = np.sum(traj)
trajlens.append(trajlen)
ptdist = np.max(cdist(X, X))
trajdist.append(ptdist)
'''最小外接矩形:
rect[0]: 中心(x, y)
rect[1]: (w, h)
rect[0]: 旋转角度 (-90°, 0]
'''
rect = cv2.minAreaRect(X.astype(np.int64))
trajrects.append(rect)
self.trajectory = trajectory
self.trajlens = trajlens
self.trajdist = trajdist
self.trajrects = trajrects
def trajfeature(self):
'''
分两种情况计算轨迹特征(检测框边界不在图像边界范围内,在图像边界范围内):
-最小长度轨迹trajmin
-最小轨迹长度trajlen_min
-最小轨迹欧氏距离trajdist_max
'''
idx1 = self.trajlens.index(max(self.trajlens))
trajmax = self.trajectory[idx1]
trajlen_max = self.trajlens[idx1]
trajdist_max = self.trajdist[idx1]
if not self.isCornpoint:
idx2 = self.trajlens.index(min(self.trajlens))
trajmin = self.trajectory[idx2]
trajlen_min = self.trajlens[idx2]
trajdist_min = self.trajdist[idx2]
else:
trajmin = self.trajectory[0]
trajlen_min = self.trajlens[0]
trajdist_min = self.trajdist[0]
'''最小轨迹长度/最大轨迹长度,越小,代表运动幅度越小'''
trajlen_rate = trajlen_min/(trajlen_max+0.0001)
'''最小轨迹欧氏距离/目标框尺度均值'''
trajdist_rate = trajdist_min/(self.mwh+0.0001)
self.trajmin = trajmin
self.trajmax = trajmax
self.feature = [trajlen_min, trajlen_max,
trajdist_min, trajdist_max,
trajlen_rate, trajdist_rate]
def compute_static_fids(self, det_y, STATIC_THRESH = 8):
'''
前摄时y一般选择为 box 的 y1 坐标,且需限定商品在购物车内。
inputs
y1D array
parameters
STATIC_THRESH轨迹处于静止状态的阈值。
outputs
输出为差分值小于 STATIC_THRESH 的y中元素的start, end索引
ranges = [(x1, y1),
(x1, y1),
...]
'''
# print(f"The ID is: {self.tid}")
# det_y = np.diff(y, axis=0)
ranges, rangex = [], []
static_indices = np.where(np.abs(det_y) < STATIC_THRESH)[0]
if len(static_indices) == 0:
rangex.append((0, len(det_y)))
return ranges, rangex
start_index = static_indices[0]
for i in range(1, len(static_indices)):
if static_indices[i] != static_indices[i-1] + 1:
ranges.append((start_index, static_indices[i-1] + 1))
start_index = static_indices[i]
ranges.append((start_index, static_indices[-1] + 1))
if len(ranges) == 0:
rangex.append((0, len(det_y)))
return ranges, rangex
idx1, idx2 = ranges[0][0], ranges[-1][1]
if idx1 != 0:
rangex.append((0, idx1))
# 轨迹的最后阶段是运动状态
for k in range(1, len(ranges)):
index1 = ranges[k-1][1]
index2 = ranges[k][0]
rangex.append((index1, index2))
if idx2 != len(det_y):
rangex.append((idx2, len(det_y)))
return ranges, rangex
def extract_hand_features(self):
assert self.cls == 0, "The class of traj must be HAND!"
self.isHandStatic = False
x0 = (self.boxes[:, 0] + self.boxes[:, 2]) / 2
y0 = (self.boxes[:, 1] + self.boxes[:, 3]) / 2
handXY = np.stack((x0, y0), axis=-1)
# handMaxY0 = np.max(y0)
handCenter = np.array([(max(x0)+min(x0))/2, (max(y0)+min(y0))/2])
handMaxDist = np.max(np.linalg.norm(handXY - handCenter))
if handMaxDist < self.HAND_STATIC_THRESH:
self.isHandStatic = True
return
class doTracks:
def __init__(self, bboxes, TracksDict):
'''fundamental property'''
self.bboxes = bboxes
self.TracksDict = TracksDict
self.frameID = np.unique(bboxes[:, 7].astype(int))
self.trackID = np.unique(bboxes[:, 4].astype(int))
self.lboxes = self.array2list()
'''对 self.tracks 中的元素进行分类,将 track 归入相应列表中'''
self.Hands = []
self.Kids = []
self.Static = []
self.Residual = []
self.DownWard = [] # subset of self.Residual
self.UpWard = [] # subset of self.Residual
self.FreeMove = [] # subset of self.Residual
def array2list(self):
'''
将 bboxes 变换为 track 列表
bboxes: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]
Return
lboxes列表列表中元素具有同一 track_idx1y1x2y2 格式
[x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]
'''
track_ids = self.bboxes[:, 4].astype(int)
lboxes = []
for t_id in self.trackID:
# print(f"The ID is: {t_id}")
idx = np.where(track_ids == t_id)[0]
box = self.bboxes[idx, :]
lboxes.append(box)
return lboxes
def classify(self):
tracks = self.tracks
# 提取手的frame_id并和动目标的frame_id 进行关联
hand_tracks = [t for t in tracks if t.cls==0]
self.Hands.extend(hand_tracks)
tracks = self.sub_tracks(tracks, hand_tracks)
# 提取小孩的track并计算状态left, right, incart
kid_tracks = [t for t in tracks if t.cls==9]
kid_states = [self.kid_state(t) for t in kid_tracks]
self.Kids = [x for x in zip(kid_tracks, kid_states)]
tracks = self.sub_tracks(tracks, kid_tracks)
static_tracks = [t for t in tracks if t.frnum>1 and t.is_static()]
self.Static.extend(static_tracks)
'''剔除静止目标后的 tracks'''
tracks = self.sub_tracks(tracks, static_tracks)
return tracks
def similarity(self):
nt = len(self.tracks)
similar_dict = {}
if nt >= 2:
for i in range(nt):
for j in range(i, nt):
tracka = self.tracks[i]
trackb = self.tracks[j]
similar = self.feat_similarity(tracka, trackb)
similar_dict.update({(tracka.tid, trackb.tid): similar})
return similar_dict
def feat_similarity(self, tracka, trackb, metric='cosine'):
boxes_a, boxes_b = tracka.boxes, trackb.boxes
na, nb = tracka.boxes.shape[0], trackb.boxes.shape[0]
feata, featb = [], []
for i in range(na):
fid, bid = tracka.boxes[i, 7:9]
feata.append(self.features_dict[fid][bid])
for i in range(nb):
fid, bid = trackb.boxes[i, 7:9]
featb.append(self.features_dict[fid][bid])
feata = np.asarray(feata, dtype=np.float32)
featb = np.asarray(featb, dtype=np.float32)
similarity_matrix = 1-np.maximum(0.0, cdist(feata, featb, metric))
feata_m = np.mean(feata, axis =0)[None,:]
featb_m = np.mean(featb, axis =0)[None,:]
simi_ab = 1 - cdist(feata_m, featb_m, metric)
print(f'tid {int(boxes_a[0, 4])} vs {int(boxes_b[0, 4])}: {simi_ab[0][0]}')
# return np.max(similarity_matrix)
return simi_ab
def merge_tracks_loop(self, alist):
na, nb = len(alist), 0
while na!=nb:
na = len(alist)
alist = self.merge_tracks(alist) #func is from subclass
nb = len(alist)
return alist
def base_merge_tracks(self, Residual):
"""
对不同id但可能是同一商品的目标进行归并
"""
mergedTracks = []
alist = [t for t in Residual]
while alist:
atrack = alist[0]
cur_list = []
cur_list.append(atrack)
alist.pop(0)
blist = [b for b in alist]
alist = []
for btrack in blist:
if track_equal_track(atrack, btrack, self.TracksDict):
cur_list.append(btrack)
else:
alist.append(btrack)
mergedTracks.append(cur_list)
return mergedTracks
@staticmethod
def join_tracks(tlista, tlistb):
"""Combine two lists of stracks into a single one."""
exists = {}
res = []
for t in tlista:
exists[t.tid] = 1
res.append(t)
for t in tlistb:
tid = t.tid
if not exists.get(tid, 0):
exists[tid] = 1
res.append(t)
return res
@staticmethod
def sub_tracks(tlista, tlistb):
track_ids_b = {t.tid for t in tlistb}
return [t for t in tlista if t.tid not in track_ids_b]

View File

@ -0,0 +1,179 @@
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 4 18:36:31 2024
@author: ym
"""
import numpy as np
from utils.mergetrack import track_equal_track
from scipy.spatial.distance import cdist
from .dotracks import doTracks, ShoppingCart
from .track_back import backTrack
class doBackTracks(doTracks):
def __init__(self, bboxes, TracksDict):
super().__init__(bboxes, TracksDict)
self.tracks = [backTrack(b) for b in self.lboxes]
# self.similar_dict = self.similarity()
self.shopcart = ShoppingCart(bboxes)
# =============================================================================
# def array2list(self):
# ''' 0, 1, 2, 3, 4, 5, 6, 7, 8
# bboxes: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]
# lboxes[x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]
# '''
#
# track_ids = set(self.bboxes[:, 4])
# lboxes = []
# for t_id in track_ids:
# idx = np.where(self.bboxes[:, 4] == t_id)[0]
# box = self.bboxes[idx, :]
#
# x = (box[:, 0] + box[:, 2]) / 2
# y = (box[:, 1] + box[:, 3]) / 2
#
# # box: [x, y, w, h, track_id, score, cls, frame_index]
# box[:, 2] = box[:, 2] - box[:, 0]
# box[:, 3] = box[:, 3] - box[:, 1]
# box[:, 0] = x
# box[:, 1] = y
#
# lboxes.append(box)
#
#
# return lboxes
# =============================================================================
def classify(self):
'''
功能:对 tracks 中元素分类
'''
tracks = super().classify()
# tracks = self.tracks
# shopcart = self.shopcart
# # 提取手的frame_id并和动目标的frame_id 进行关联
# hand_tracks = [t for t in tracks if t.cls==0]
# self.Hands.extend(hand_tracks)
# tracks = self.sub_tracks(tracks, hand_tracks)
# # 提取小孩的track并计算状态left, right, incart
# kid_tracks = [t for t in tracks if t.cls==9]
# kid_states = [self.kid_state(t) for t in kid_tracks]
# self.Kids = [x for x in zip(kid_tracks, kid_states)]
# tracks = self.sub_tracks(tracks, kid_tracks)
# static_tracks = [t for t in tracks if t.frnum>1 and t.is_static()]
# self.Static.extend(static_tracks)
# '''剔除静止目标后的 tracks'''
# tracks = self.sub_tracks(tracks, static_tracks)
'''购物框边界外具有运动状态的干扰目标'''
out_trcak = [t for t in tracks if t.is_OutTrack()]
tracks = self.sub_tracks(tracks, out_trcak)
'''轨迹循环归并'''
# merged_tracks = self.merge_tracks(tracks)
merged_tracks = self.merge_tracks_loop(tracks)
tracks = [t for t in merged_tracks if t.frnum > 1]
self.Residual = tracks
def merge_tracks(self, Residual):
"""
对不同id但可能是同一商品的目标进行归并
"""
mergedTracks = self.base_merge_tracks(Residual)
oldtracks, newtracks = [], []
for tracklist in mergedTracks:
if len(tracklist) > 1:
boxes = np.empty((0, 9), dtype=np.float32)
for i, track in enumerate(tracklist):
if i==0: ntid, ncls=track.boxes[0, 4], track.boxes[0, 6]
iboxes = track.boxes.copy()
iboxes[:, 4], iboxes[:, 6] = ntid, ncls
boxes = np.concatenate((boxes, iboxes), axis=0)
oldtracks.append(track)
fid_indices = np.argsort(boxes[:, 7])
boxes_fid = boxes[fid_indices]
newtracks.append(backTrack(boxes_fid))
elif len(tracklist) == 1:
oldtracks.append(tracklist[0])
newtracks.append(tracklist[0])
redu = self.sub_tracks(Residual, oldtracks)
merged = self.join_tracks(redu, newtracks)
return merged
def kid_state(self, track):
left_dist = track.cornpoints[:, 2]
right_dist = 1024 - track.cornpoints[:, 4]
if np.sum(left_dist<30)/track.frnum>0.8 and np.sum(right_dist>512)/track.frnum>0.7:
kidstate = "left"
elif np.sum(left_dist>512)/track.frnum>0.7 and np.sum(right_dist<30)/track.frnum>0.8:
kidstate = "right"
else:
kidstate = "incart"
return kidstate
def is_associate_with_hand(self):
"""
分析商品和手之间的关联性
"""
pass
def isuptrack(self, track):
Flag = False
return Flag
def isdowntrack(self, track):
Flag = False
return Flag
def isfreetrack(self, track):
Flag = False
return Flag

View File

@ -0,0 +1,184 @@
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 4 18:38:20 2024
@author: ym
"""
import numpy as np
from utils.mergetrack import track_equal_track
from .dotracks import doTracks
from .track_front import frontTrack
class doFrontTracks(doTracks):
def __init__(self, bboxes, TracksDict):
super().__init__(bboxes, TracksDict)
self.tracks = [frontTrack(b) for b in self.lboxes]
def classify(self):
'''功能:对 tracks 中元素分类 '''
tracks = self.tracks
'''提取手的 tracks'''
hand_tracks = [t for t in tracks if t.cls==0]
self.Hands.extend(hand_tracks)
tracks = self.sub_tracks(tracks, hand_tracks)
'''提取小孩的 tracks'''
kid_tracks = [t for t in tracks if t.cls==9]
tracks = self.sub_tracks(tracks, kid_tracks)
'''静态 tracks'''
static_tracks = [t for t in tracks if t.frnum>1 and t.is_static()]
'''剔除静止目标后的 tracks'''
tracks = self.sub_tracks(tracks, static_tracks)
'''轨迹循环归并'''
merged_tracks = self.merge_tracks_loop(tracks)
tracks = [t for t in merged_tracks if t.frnum > 1]
for gtrack in tracks:
# print(f"Goods ID:{gtrack.tid}")
for htrack in hand_tracks:
if self.is_associate_with_hand(htrack, gtrack):
gtrack.hands.append(htrack)
freemoved_tracks = [t for t in tracks if t.is_free_move()]
tracks = self.sub_tracks(tracks, freemoved_tracks)
self.Residual = tracks
def is_associate_with_hand(self, htrack, gtrack):
'''手部 Track、商品 Track 建立关联的依据:
a. 运动帧的帧索引有交集
b. 帧索引交集部分iou均大于0
'''
assert htrack.cls==0 and gtrack.cls!=0 and gtrack.cls!=9, 'Track cls is Error!'
hboxes = np.empty(shape=(0, 9), dtype = np.float)
gboxes = np.empty(shape=(0, 9), dtype = np.float)
# start, end 为索引值,需要 start:(end+1)
for start, end in htrack.dynamic_y2:
hboxes = np.concatenate((hboxes, htrack.boxes[start:end+1, :]), axis=0)
for start, end in gtrack.dynamic_y1:
gboxes = np.concatenate((gboxes, gtrack.boxes[start:end+1, :]), axis=0)
hfids, gfids = hboxes[:, 7], gboxes[:, 7]
fids = set(hfids).intersection(set(gfids))
if len(fids)==0:
return False
# print(f"Goods ID: {gtrack.tid}, Hand ID: {htrack.tid}")
ious = []
for f in fids:
h = np.where(hfids==f)[0][0]
g = np.where(gfids==f)[0][0]
x11, y11, x12, y12 = hboxes[h, 0:4]
x21, y21, x22, y22 = gboxes[g, 0:4]
x1, y1 = max((x11, x21)), max((y11, y21))
x2, y2 = min((x12, x22)), min((y12, y22))
union = (x2 - x1).clip(0) * (y2 - y1).clip(0)
area1 = (x12 - x11) * (y12 - y11)
area2 = (x22 - x21) * (y22 - y21)
iou = union / (area1 + area2 - union + 1e-6)
if iou>0:
ious.append(iou)
return len(ious)
def merge_tracks(self, Residual):
"""
对不同id但可能是同一商品的目标进行归并
"""
# =============================================================================
# mergedTracks = []
# alist = [t for t in Residual]
# while alist:
# atrack = alist[0]
# cur_list = []
# cur_list.append(atrack)
# alist.pop(0)
#
# blist = [b for b in alist]
# alist = []
# for btrack in blist:
# if track_equal_track(atrack, btrack, self.TracksDict):
# cur_list.append(btrack)
# else:
# alist.append(btrack)
#
# mergedTracks.append(cur_list)
# =============================================================================
mergedTracks = self.base_merge_tracks(Residual)
oldtracks, newtracks = [], []
for tracklist in mergedTracks:
if len(tracklist) > 1:
boxes = np.empty((0, 9), dtype=np.float32)
for i, track in enumerate(tracklist):
if i==0: ntid, ncls=track.boxes[0, 4], track.boxes[0, 6]
iboxes = track.boxes.copy()
iboxes[:, 4], iboxes[:, 6] = ntid, ncls
boxes = np.concatenate((boxes, iboxes), axis=0)
oldtracks.append(track)
fid_indices = np.argsort(boxes[:, 7])
boxes_fid = boxes[fid_indices]
newtracks.append(frontTrack(boxes_fid))
elif len(tracklist) == 1:
oldtracks.append(tracklist[0])
newtracks.append(tracklist[0])
redu = self.sub_tracks(Residual, oldtracks)
merged = self.join_tracks(redu, newtracks)
return merged
# =============================================================================
# def array2list(self):
# '''
# 将 bboxes 变换为 track 列表
# bboxes: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]
# Return
# lboxes列表列表中元素具有同一 track_idx1y1x2y2 格式
# [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]
# '''
# track_ids = set(self.bboxes[:, 4])
# lboxes = []
# for t_id in track_ids:
# # print(f"The ID is: {t_id}")
# idx = np.where(self.bboxes[:, 4] == t_id)[0]
# box = self.bboxes[idx, :]
#
# lboxes.append(box)
#
# return lboxes
# =============================================================================

View File

@ -0,0 +1,329 @@
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 4 18:28:47 2024
@author: ym
"""
import cv2
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.decomposition import PCA
from .dotracks import MoveState, Track
class backTrack(Track):
# boxes: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]
# 0, 1, 2, 3, 4, 5, 6, 7, 8
def __init__(self, boxes, imgshape=(1024, 1280)):
super().__init__(boxes, imgshape)
'''该函数依赖项: self.cornpoints'''
self.isCornpoint = self.isimgborder()
'''该函数依赖项: self.isCornpoint不能在父类中初始化'''
self.trajfeature()
'''静止点帧索引'''
self.static_index = self.compute_static_fids()
'''运动点帧索引(运动帧两端的静止帧索引)'''
self.moving_index = self.compute_dynamic_fids()
self.static_dynamic_fids = self.compute_static_dynamic_fids()
'''该函数依赖项: self.cornpoints定义 4 个商品位置变量:
self.Cent_isIncart, self.LB_isIncart, self.RB_isIncart
self.posState = self.Cent_isIncart+self.LB_isIncart+self.RB_isIncart'''
self.PositionState()
'''self.feature_ious = (incart_iou, outcart_iou, cartboarder_iou, maxbox_iou, minbox_iou)
self.incartrates = incartrates'''
self.compute_ious_feat()
# self.PCA()
def isimgborder(self, BoundPixel=10, BoundThresh=0.3):
x1, y1 = self.cornpoints[:,2], self.cornpoints[:,3],
x2, y2 = self.cornpoints[:,8], self.cornpoints[:,9]
cont1 = sum(abs(x1)<BoundPixel) / self.frnum > BoundThresh
cont2 = sum(abs(y1)<BoundPixel) / self.frnum > BoundThresh
cont3 = sum(abs(x2-self.imgshape[0])<BoundPixel) / self.frnum > BoundThresh
cont4 = sum(abs(y2-self.imgshape[1])<BoundPixel) / self.frnum > BoundThresh
cont = cont1 or cont2 or cont3 or cont4
isCornpoint = False
if cont:
isCornpoint = True
return isCornpoint
def PositionState(self, camerType="back"):
'''
camerType: back, 后置摄像头
front, 前置摄像头
'''
if camerType=="front":
incart = cv2.imread("./shopcart/cart_tempt/incart.png", cv2.IMREAD_GRAYSCALE)
else:
incart = cv2.imread("./shopcart/cart_tempt/incart_ftmp.png", cv2.IMREAD_GRAYSCALE)
xc, yc = self.cornpoints[:,0].clip(0,self.imgshape[0]-1).astype(np.int64), self.cornpoints[:,1].clip(0,self.imgshape[1]-1).astype(np.int64)
x1, y1 = self.cornpoints[:,6].clip(0,self.imgshape[0]-1).astype(np.int64), self.cornpoints[:,7].clip(0,self.imgshape[1]-1).astype(np.int64)
x2, y2 = self.cornpoints[:,8].clip(0,self.imgshape[0]-1).astype(np.int64), self.cornpoints[:,9].clip(0,self.imgshape[1]-1).astype(np.int64)
# print(self.tid)
Cent_inCartnum = np.count_nonzero(incart[(yc, xc)])
LB_inCartnum = np.count_nonzero(incart[(y1, x1)])
RB_inCartnum = np.count_nonzero(incart[(y2, x2)])
self.Cent_isIncart = False
self.LB_isIncart = False
self.RB_isIncart = False
if Cent_inCartnum: self.Cent_isIncart = True
if LB_inCartnum: self.LB_isIncart = True
if RB_inCartnum: self.RB_isIncart = True
self.posState = self.Cent_isIncart+self.LB_isIncart+self.RB_isIncart
def PCA(self):
self.pca = PCA()
X = self.cornpoints[:, 0:2]
self.pca.fit(X)
def compute_ious_feat(self):
'''输出:
self.feature_ious = (incart_iou, outcart_iou, cartboarder_iou, maxbox_iou, minbox_iou)
self.incartrates = incartrates
其中:
boxes流track中所有boxes形成的轨迹图可分为三部分incart, outcart, cartboarder
incart_iou, outcart_iou, cartboarder_iou各部分和 boxes流的 iou。
incart_iou = 0track在购物车外
outcart_iou = 0track在购物车内也可能是通过左下角、右下角置入购物车
maxbox_iou, minbox_ioutrack中最大、最小 box 和boxes流的iou二者差值越小越接近 1表明track的运动型越小。
incartrates: 各box和incart的iou时序由小变大反应的是置入过程由大变小反应的是取出过程
'''
incart = cv2.imread("./shopcart/cart_tempt/incart.png", cv2.IMREAD_GRAYSCALE)
outcart = cv2.imread("./shopcart/cart_tempt/outcart.png", cv2.IMREAD_GRAYSCALE)
cartboarder = cv2.imread("./shopcart/cart_tempt/cartboarder.png", cv2.IMREAD_GRAYSCALE)
incartrates = []
temp = np.zeros(incart.shape, np.uint8)
maxarea, minarea = 0, self.imgshape[0]*self.imgshape[1]
for i in range(self.frnum):
# x, y, w, h = self.boxes[i, 0:4]
x = (self.boxes[i, 2] + self.boxes[i, 0]) / 2
w = (self.boxes[i, 2] - self.boxes[i, 0]) / 2
y = (self.boxes[i, 3] + self.boxes[i, 1]) / 2
h = (self.boxes[i, 3] - self.boxes[i, 1]) / 2
if w*h > maxarea: maxarea = w*h
if w*h < minarea: minarea = w*h
cv2.rectangle(temp, (int(x-w/2), int(y-h/2)), (int(x+w/2), int(y+h/2)), 255, cv2.FILLED)
temp1 = np.zeros(incart.shape, np.uint8)
cv2.rectangle(temp1, (int(x-w/2), int(y-h/2)), (int(x+w/2), int(y+h/2)), 255, cv2.FILLED)
temp2 = cv2.bitwise_and(incart, temp1)
inrate = cv2.countNonZero(temp1)/(w*h)
incartrates.append(inrate)
isincart = cv2.bitwise_and(incart, temp)
isoutcart = cv2.bitwise_and(outcart, temp)
iscartboarder = cv2.bitwise_and(cartboarder, temp)
num_temp = cv2.countNonZero(temp)
num_incart = cv2.countNonZero(isincart)
num_outcart = cv2.countNonZero(isoutcart)
num_cartboarder = cv2.countNonZero(iscartboarder)
incart_iou = num_incart/num_temp
outcart_iou = num_outcart/num_temp
cartboarder_iou = num_cartboarder/num_temp
maxbox_iou = maxarea/num_temp
minbox_iou = minarea/num_temp
self.feature_ious = (incart_iou, outcart_iou, cartboarder_iou, maxbox_iou, minbox_iou)
self.incartrates = incartrates
def compute_static_fids(self, thresh1 = 12, thresh2 = 3):
'''
计算 track 的轨迹中相对处于静止状态的轨迹点的start_frame_id, end_frame_id
thresh1: 相邻两帧目标中心点是否静止的的阈值,以像素为单位,
thresh2: 连续捕捉到目标处于静止状态的帧数,当 thresh2 = 3时,至少连续 4个点,
产生3个相邻点差值均小于 thresh1 时,判定为连续静止.
处理过程中利用了插值技术因此start、end并非 self.boxes 中对应的帧索引
'''
BoundPixel = 8
x1, y1 = self.cornpoints[:,2], self.cornpoints[:,3],
x2, y2 = self.cornpoints[:,8], self.cornpoints[:,9]
cont1 = sum(abs(x1)<BoundPixel) > 3
# cont2 = sum(abs(y1)<BoundPixel) > 3
cont3 = sum(abs(x2-self.imgshape[0])<BoundPixel) > 3
# cont4 = sum(abs(y2-self.imgshape[1])<BoundPixel) > 3
cont = not(cont1 or cont3)
## ============== 下一步,启用中心点,选择具有最小运动幅度的角点作为参考点
static_index = []
if self.frnum>=2 and cont:
x1 = self.boxes[1:,7]
x2 = [i for i in range(int(min(x1)), int(max(x1)+1))]
dist_adjc = np.interp(x2, x1, self.trajmin)
# dist_adjc = self.trajmin
static_thresh = (dist_adjc < thresh1)[:, None].astype(np.uint8)
static_cnts, _ = cv2.findContours(static_thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
for cnt in static_cnts:
_, start, _, num = cv2.boundingRect(cnt)
end = start + num
if num < thresh2:
continue
static_index.append((start, end))
static_index = np.array(static_index)
if static_index.size:
indx = np.argsort(static_index[:, 0])
static_index = static_index[indx]
return static_index
def compute_dynamic_fids(self, thresh1 = 12, thresh2 = 3):
'''
计算 track 的轨迹中运动轨迹点的start_frame_id, end_frame_id
thresh1: 相邻两帧目标中心点是否运动的阈值,以像素为单位,
thresh2: 连续捕捉到目标连续运动的帧数
目标:
1. 计算轨迹方向
2. 计算和手部运动的关联性
'''
moving_index = []
if self.frnum>=2:
x1 = self.boxes[1:,7]
x2 = [i for i in range(int(min(x1)), int(max(x1)+1))]
dist_adjc = np.interp(x2, x1, self.trajmin)
moving_thresh = (dist_adjc >= thresh1)[:, None].astype(np.uint8)
moving_cnts, _ = cv2.findContours(moving_thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
for cnt in moving_cnts:
_, start, _, num = cv2.boundingRect(cnt)
if num < thresh2:
continue
end = start + num
moving_index.append((start, end))
# =============================================================================
# '''========= 输出帧id不太合适 ========='''
# moving_fids = []
# for i in range(len(moving_index)):
# i1, i2 = moving_index[i]
# fid1, fid2 = boxes[i1, 7], boxes[i2, 7]
# moving_fids.append([fid1, fid2])
# moving_fids = np.array(moving_fids)
# =============================================================================
moving_index = np.array(moving_index)
if moving_index.size:
indx = np.argsort(moving_index[:, 0])
moving_index = moving_index[indx]
return moving_index
def compute_static_dynamic_fids(self):
static_dynamic_fids = []
for traj in self.trajectory:
static, dynamic = self.compute_static_fids(traj)
static_dynamic_fids.append((static, dynamic))
return static_dynamic_fids
def is_static(self):
'''静态情况 1: 目标关键点最小相对运动轨迹 < 0.2, 指标值偏大
feature = [trajlen_min, trajlen_max,
trajdist_min, trajdist_max,
trajlen_rate, trajdist_rate]
'''
condt1 = self.feature[5] < 0.2 or self.feature[3] < 120
'''静态情况 2: 目标初始状态为静止,适当放宽关键点最小相对运动轨迹 < 0.5'''
condt2 = self.static_index.size > 0 \
and self.static_index[0, 0] <= 2 \
and self.feature[5] < 0.5
'''静态情况 3: 目标初始状态和最终状态均为静止'''
condt3 = self.static_index.shape[0] >= 2 \
and self.static_index[0, 0] <= 2 \
and self.static_index[-1, 1] >= self.frnum-3 \
condt = condt1 or condt2 or condt3
return condt
# =============================================================================
# track1 = [t for t in tracks if t.feature[5] < 0.2
# or t.feature[3] < 120
# ]
#
# track2 = [t for t in tracks if t.static_index.size > 0
# and t.static_index[0, 0] <= 2
# and t.feature[5] < 0.5]
#
# track3 = [t for t in tracks if t.static_index.shape[0] >= 2
# and t.static_index[0, 0] <= 2
# and t.static_index[-1, 1] >= t.frnum-3]
#
# track12 = self.join_tracks(track1, track2)
#
# '''提取静止状态的 track'''
# static_tracks = self.join_tracks(track12, track3)
# self.Static.extend(static_tracks)
#
# =============================================================================
def is_OutTrack(self):
if self.posState <= 1:
isout = True
else:
isout = False
return isout
def compute_distance(self):
pass
def move_start_fid(self):
pass
def move_end_fid(self):
pass

View File

@ -0,0 +1,271 @@
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 4 18:33:01 2024
@author: ym
"""
import numpy as np
from sklearn.cluster import KMeans
from .dotracks import MoveState, Track
class frontTrack(Track):
# boxes: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]
# 0, 1, 2, 3, 4, 5, 6, 7, 8
def __init__(self, boxes, imgshape=(1024, 1280)):
super().__init__(boxes, imgshape)
self.hands = []
'''5个关键点中心点、左上点、右上点、左下点、右下点 )轨迹特征'''
# self.compute_cornpts_feats()
self.CART_HIGH_THRESH1 = imgshape[1]/2.98
# if self.tid==10:
# print(f"ID: {self.tid}")
'''y1、y2静止状态区间值是 boxes 中对 axis=0 的索引,不是帧索引'''
det_y1 = np.diff(boxes[:, 1], axis=0)
det_y2 = np.diff(boxes[:, 3], axis=0)
self.static_y1, self.dynamic_y1 = self.compute_static_fids(det_y1)
self.static_y2, self.dynamic_y2 = self.compute_static_fids(det_y2)
self.isCornpoint = self.is_left_or_right_cornpoint()
self.isBotmpoint = self.is_bottom_cornpoint()
'''该函数依赖项: self.isCornpoint不能在父类中初始化'''
self.trajfeature()
'''手部状态分析'''
self.HAND_STATIC_THRESH = 100
self.CART_POSIT_0 = 430
self.CART_POSIT_1 = 620
def is_left_or_right_cornpoint(self):
''' 基于 all(boxes)
boxes左下角点和图像左下角点重叠 或
boxes右下角点和图像左下角点重叠
'''
x1, y1 = self.boxes[:, 0], self.boxes[:, 1]
x2, y2 = self.boxes[:, 2], self.boxes[:, 3]
# Left-Bottom cornpoint
condt1 = all(x1 < 5) and all(y2 > self.imgshape[1]-5)
# Right-Bottom cornpoint
condt2 = all(x2 > self.imgshape[0]-5) and all(y2 > self.imgshape[1]-5)
condt = condt1 or condt2
return condt
def is_edge_cornpoint(self):
'''基于 all(boxes)boxes是否和图像左右边缘重叠'''
x1, x2 = self.boxes[:, 0], self.boxes[:, 2]
condt = all(x1 < 3) or all(x2 > self.imgshape[0]-3)
return condt
def is_bottom_cornpoint(self):
'''基于 all(boxes)boxes是否和图像下边缘重叠'''
condt = all(self.boxes[:, 3] > self.imgshape[1]-20)
return condt
# def is_OutTrack(self):
# isout = False
# if self.posState <= 1:
# isout = True
# return isout
# =============================================================================
# def compute_static_fids(self, det_y, STATIC_THRESH = 8):
# '''
# 前摄时y一般选择为 box 的 y1 坐标,且需限定商品在购物车内。
# inputs
# y1D array
# parameters
# STATIC_THRESH轨迹处于静止状态的阈值。
# outputs
# 输出为差分值小于 STATIC_THRESH 的y中元素的start, end索引
# ranges = [(x1, y1),
# (x1, y1),
# ...]
# '''
# # print(f"The ID is: {self.tid}")
#
# # det_y = np.diff(y, axis=0)
# ranges, rangex = [], []
#
# static_indices = np.where(np.abs(det_y) < STATIC_THRESH)[0]
#
# if len(static_indices) == 0:
# rangex.append((0, len(det_y)))
# return ranges, rangex
#
# start_index = static_indices[0]
#
# for i in range(1, len(static_indices)):
# if static_indices[i] != static_indices[i-1] + 1:
# ranges.append((start_index, static_indices[i-1] + 1))
# start_index = static_indices[i]
# ranges.append((start_index, static_indices[-1] + 1))
#
# if len(ranges) == 0:
# rangex.append((0, len(det_y)))
# return ranges, rangex
#
# idx1, idx2 = ranges[0][0], ranges[-1][1]
#
# if idx1 != 0:
# rangex.append((0, idx1))
#
# # 轨迹的最后阶段是运动状态
# for k in range(1, len(ranges)):
# index1 = ranges[k-1][1]
# index2 = ranges[k][0]
# rangex.append((index1, index2))
#
# if idx2 != len(det_y):
# rangex.append((idx2, len(det_y)))
#
# return ranges, rangex
#
# =============================================================================
def is_static(self):
assert self.frnum > 1, "boxes number must greater than 1"
# print(f"The ID is: {self.tid}")
# 手部和小孩目标不考虑
if self.cls == 0 or self.cls == 9:
return False
# boxes 全部 y2=1280
if self.isBotmpoint:
return True
boxes = self.boxes
y0 = (boxes[:, 1]+boxes[:, 3])/2
## 纵轴矢量和
sum_y0 = y0[-1] - y0[0]
sum_y1 = boxes[-1, 1]-boxes[0, 1]
sum_y2 = boxes[-1, 3]-boxes[0, 3]
# 一些需要考虑的特殊情况
isbottom = max(boxes[:, 3]) > 1280-3
istop = min(boxes[:, 1]) < 3
isincart = min(y0) > self.CART_HIGH_THRESH1
uncert = abs(sum_y1)<100 and abs(sum_y2)<100
'''初始条件:商品中心点始终在购物车内、'''
condt0 = max((boxes[:, 1]+boxes[:, 3])/2) > self.CART_HIGH_THRESH1
'''条件1轨迹运动纵向和y1 或 y2描述商品轨迹长度存在情况
(1). 检测框可能与图像上下边缘重合,
(2). 上边或下边存在跳动
'''
if isbottom and istop:
condt1 = abs(sum_y0) < 300
elif isbottom: # y2在底部用y1表征运动
condt1 = sum_y1 > -120 and abs(sum_y0)<80 # 有底部点方向向上阈值小于100
elif istop: # y1在顶部用y2表征运动
condt1 = abs(sum_y2) < 100
else:
condt1 = (abs(sum_y1) < 30 or abs(sum_y2)<30)
'''条件2轨迹的开始和结束阶段均处于静止状态, 利用静止状态区间判断,用 y1
a. 商品在购物车内,
b. 检测框的起始阶段和结束阶段均为静止状态
c. 静止帧长度 > 3'''
condt2 = False
if len(self.static_y1)>=2:
condt_s0 = self.static_y1[0][0]==0 and self.static_y1[0][1] - self.static_y1[0][0] >= 3
condt_s1 = self.static_y1[-1][1]==self.frnum-1 and self.static_y1[-1][1] - self.static_y1[-1][0] >= 3
condt2 = condt_s0 and condt_s1 and isincart
condt = condt0 and (condt1 or condt2)
return condt
def is_upward(self):
'''判断商品是否取出,'''
print(f"The ID is: {self.tid}")
def is_free_move(self):
if self.frnum == 1:
return True
# print(f"The ID is: {self.tid}")
y0 = (self.boxes[:, 1] + self.boxes[:, 3]) / 2
det_y0 = np.diff(y0, axis=0)
sum_y0 = y0[-1] - y0[0]
'''情况1中心点向下 '''
## 初始条件:商品第一次检测到在购物车内
condt0 = y0[0] > self.CART_HIGH_THRESH1
condt_a = False
## 条件1商品初始为静止状态静止条件应严格一些
condt11, condt12 = False, False
if len(self.static_y1)>0:
condt11 = self.static_y1[0][0]==0 and self.static_y1[0][1] - self.static_y1[0][0] >= 5
if len(self.static_y2)>0:
condt12 = self.static_y2[0][0]==0 and self.static_y2[0][1] - self.static_y2[0][0] >= 5
# 条件2商品中心发生向下移动
condt2 = y0[-1] > y0[0]
# 综合判断a
condt_a = condt0 and (condt11 or condt12) and condt2
'''情况2中心点向上 '''
## 商品中心点向上移动但没有关联的Hand轨迹也不是左右边界点
condt_b = condt0 and len(self.hands)==0 and y0[-1] < y0[0] and (not self.is_edge_cornpoint())
'''情况3: 商品在购物车内,但运动方向无序'''
## 中心点在购物车内,纵向轨迹和小于轨迹差中绝对值最大的两个值的和,说明运动没有主方向
condt_c = False
if self.frnum > 3:
condt_c = all(y0>self.CART_HIGH_THRESH1) and \
(abs(sum_y0) < sum(np.sort(np.abs(det_y0))[::-1][:2])-1)
condt = (condt_a or condt_b or condt_c) and self.cls!=0
return condt

773
tracking/goodmatch.py Normal file
View File

@ -0,0 +1,773 @@
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 16 11:51:07 2024
@author: ym
"""
import cv2
import os
import numpy as np
# import time
import pickle
import json
# import matplotlib.pyplot as plt
import pandas as pd
import shutil
import random
import math
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
from pathlib import Path
from utils.gen import Profile
from dotrack.dotracks_back import doBackTracks
from dotrack.dotracks_front import doFrontTracks
from utils.drawtracks import plot_frameID_y2, draw_all_trajectories
# from utils.drawtracks import draw5points, drawTrack, drawtracefeat, drawFeatures
# from datetime import datetime
from utils.mergetrack import readDict
def get_img_filename(imgpath = r'./matching/images/' ):
imgexts = ['.png', '.jpg', '.jpeg']
ImgFileList = []
for root, dirs, files in os.walk(imgpath):
ImgList = []
for file in files:
_, ext = os.path.splitext(file)
if ext in imgexts:
ImgFileList.append(os.path.join(root, file))
return ImgFileList
def calculate_similarity_track(similarmode = 'other'):
'''
similarmode:
'mean'
'other'
'''
ContrastDict = np.load('./matching/featdata/imgs_feats_data_refined.pkl', allow_pickle=True)
print(f"The Num of imgsample: {len(ContrastDict)}")
''''================= 构造空字典 MatchObjDict ================='''
def splitkey(key):
videoname = key.split('_')
BarCode = videoname[0]
SampleTime = videoname[1].split('-')[1]
CameraType = videoname[2]
ActionType = videoname[3]
TrackID = '_'.join(key.split('_')[7:])
return BarCode, SampleTime, CameraType, ActionType, TrackID
MatchObjList = []
CameraList = []
for key in ContrastDict.keys():
BarCode, SampleTime, CameraType, ActionType, FeatureID = splitkey(key)
MatchObjList.append('_'.join([BarCode, SampleTime, ActionType]))
CameraList.append(CameraType)
# MatchObjSet = set(MatchObjList)
# CameraSet = set(CameraList)
objects = list(set(MatchObjList))
cameras = list(set(CameraList))
assert len(cameras) == 2, "The num of cameras is error!"
MatchObjDict = {}
for obj in objects:
CameraDict = {}
for camera in cameras:
CameraDict[camera] = {}
MatchObjDict[obj] = CameraDict
for key, value in ContrastDict.items():
BarCode, SampleTime, CameraType, ActionType, FeatureID = splitkey(key)
MatchObj = '_'.join([BarCode, SampleTime, ActionType])
vdict = {}
if FeatureID not in MatchObjDict[MatchObj][CameraType]:
vdict[FeatureID] = value['feature']
MatchObjDict[MatchObj][CameraType].update(vdict)
print(f"The Num of MatchObjDict: {len(MatchObjDict)}")
# MatchKeys = [key for key in MatchObjDict.keys()]
num = len(objects)
GtMatrix = np.zeros((num, num), dtype=np.float32)
Similarity = np.zeros((num, num), dtype=np.float32)
InterMatrix = np.zeros((num, num), dtype=np.float32) # 类间
IntraMatrix = np.zeros((num, num), dtype=np.float32) # 类内
'''生成GT矩阵: GtMatrix, IntraMatrix, InterMatrix'''
for i, obi in enumerate(objects):
barcode_i = obi.split('_')[0]
for j, obj in enumerate(objects):
barcode_j = obj.split('_')[0]
if barcode_i == barcode_j:
GtMatrix[i, j] = 1
if i!=j: IntraMatrix[i, j] = 1
else:
GtMatrix[i, j] = 0
InterMatrix[i, j] = 1
'''生成相似度矩阵: Similarity '''
ObjFeatList = []
for i, obi in enumerate(objects):
obidict = MatchObjDict[obi]
camlist = []
for camera in obidict.keys():
featlist = []
for fid in obidict[camera].keys():
featlist.append(MatchObjDict[obi][camera][fid])
camlist.append(featlist)
ObjFeatList.append(camlist)
Similarity_1 = Similarity.copy()
for i in range(len(objects)):
obi = ObjFeatList[i]
for j in range(len(objects)):
obj = ObjFeatList[j]
simival = []
for ii in range(len(obi)):
if len(obi[ii])==0: continue
feat_ii = np.asarray(obi[ii])
for jj in range(len(obj)):
if len(obj[jj])==0: continue
feat_jj = np.asarray(obj[jj])
if similarmode == 'mean':
featii = np.mean(feat_ii, axis=0)
featjj = np.mean(feat_jj, axis=0)
try:
matrix = 1- np.maximum(0.0, cdist(featii[None, :], featjj[None, :], 'cosine'))
except Exception as e:
print(f'error is {e.__class__.__name__}')
else:
matrix = 1- np.maximum(0.0, cdist(feat_ii, feat_jj, 'cosine'))
simival.append(np.max(matrix))
if len(simival)==0: continue
Similarity[i, j] = max(simival)
# feat_i = np.empty((0, 256), dtype = np.float32)
# feat_j = np.empty((0, 256), dtype = np.float32)
# for ii in range(len(obi)):
# feat_ii = np.asarray(obi[ii])
# feat_i = np.concatenate((feat_i, feat_ii), axis=0)
# for jj in range(len(obj)):
# feat_jj = np.asarray(obi[jj])
# feat_j = np.concatenate((feat_j, feat_jj), axis=0)
# if similarmode == 'mean':
# feati = np.mean(feat_i, axis=0)
# featj = np.mean(feat_j, axis=0)
# matrix = 1- np.maximum(0.0, cdist(feati[None, :], featj[None, :], 'cosine'))
# else:
# matrix = 1- np.maximum(0.0, cdist(feat_i, feat_j, 'cosine'))
# Similarity_1[i, j] = np.max(matrix)
SimiDict = {'keys': objects, 'GtMatrix': GtMatrix, 'Similarity': Similarity,
'IntraMatrix':IntraMatrix, 'InterMatrix':InterMatrix}
with open(r"./matching/featdata/MatchDict_track.pkl", "wb") as f:
pickle.dump(SimiDict, f)
# SimiDict_1 = {'keys': objects, 'GtMatrix': GtMatrix, 'Similarity':Similarity_1,
# 'IntraMatrix':IntraMatrix, 'InterMatrix':InterMatrix}
# with open(r"./matching/featdata/MatchDict_track_1.pkl", "wb") as f:
# pickle.dump(SimiDict_1, f)
df_GtMatrix = pd.DataFrame(data=GtMatrix, columns = objects, index = objects)
df_GtMatrix.to_csv('./matching/featdata/GtMatrix_track.csv',index=True)
df_similarity = pd.DataFrame(data=Similarity, columns = objects, index = objects)
df_similarity.to_csv('./matching/featdata/Similarity_track.csv',index=True)
# df_similarity_1 = pd.DataFrame(data=Similarity_1, columns = objects, index = objects)
# df_similarity_1.to_csv('./matching/featdata/Similarity_track_1.csv',index=True)
print("Done!!!!")
# SimilarMode = ['mean', 'max']
def calculate_similarity(similarmode = 'mean'):
ContrastDict = np.load('./matching/featdata/imgs_feats_data_noplane.pkl', allow_pickle=True)
print(f"The Num of imgsample: {len(ContrastDict)}")
FrontBackMerged = True
TracKeys = {}
for key, value in ContrastDict.items():
feature = value['feature']
videoname = key.split('_')[:7]
BarCode = videoname[0]
SampleTime = videoname[1].split('-')[1]
CameraType = videoname[2]
ActionType = videoname[3]
TrackID = key.split('_')[7]
if FrontBackMerged:
TracKey = '_'.join([BarCode, SampleTime, ActionType])
else:
TracKey = '_'.join([BarCode, SampleTime, CameraType, ActionType])
if TracKey in TracKeys:
TracKeys[TracKey].append(feature)
else:
TracKeys[TracKey] = []
TracKeys[TracKey].append(feature)
'''===== 生成GT矩阵: Similarity、GtMatrix、IntraMatrix、InterMatrix ====='''
num = len(TracKeys)
keys = [key for key in TracKeys.keys()]
GtMatrix = np.zeros((num, num), dtype=np.float32)
Similarity = np.zeros((num, num), dtype=np.float32)
InterMatrix = np.zeros((num, num), dtype=np.float32) # 类间
IntraMatrix = np.zeros((num, num), dtype=np.float32) # 类内
for i, key_i in enumerate(keys):
barcode_i = key_i.split('_')[0]
feat_i = np.asarray(TracKeys[key_i], dtype=np.float32)
for j, key_j in enumerate(keys):
barcode_j = key_j.split('_')[0]
feat_j = np.asarray(TracKeys[key_j], dtype=np.float32)
if similarmode == 'mean':
feati = np.mean(feat_i, axis=0)
featj = np.mean(feat_j, axis=0)
matrix = 1- np.maximum(0.0, cdist(feati[None, :], featj[None, :], 'cosine'))
else:
matrix = 1- np.maximum(0.0, cdist(feat_i, feat_j, 'cosine'))
Similarity[i, j] = np.max(matrix)
if barcode_i == barcode_j:
GtMatrix[i, j] = 1
if i!=j: IntraMatrix[i, j] = 1
else:
GtMatrix[i, j] = 0
InterMatrix[i, j] = 1
# =============================================================================
# '''生成相似度矩阵: Similarity '''
# for i, key_i in enumerate(keys):
# feat_i = np.asarray(TracKeys[key_i], dtype=np.float32)
# for j, key_j in enumerate(keys):
# feat_j = np.asarray(TracKeys[key_j], dtype=np.float32)
#
# if similarmode == 'mean':
# feati = np.mean(feat_i, axis=0)
# featj = np.mean(feat_j, axis=0)
# matrix = 1- np.maximum(0.0, cdist(feati[None, :], featj[None, :], 'cosine'))
# else:
# matrix = 1- np.maximum(0.0, cdist(feat_i, feat_j, 'cosine'))
# Similarity[i, j] = np.max(matrix)
# =============================================================================
MatchDict = {'keys': keys, 'GtMatrix':GtMatrix, 'Similarity':Similarity,
'IntraMatrix':IntraMatrix, 'InterMatrix':InterMatrix}
with open(r"./matching/featdata/MatchDict_noplane.pkl", "wb") as f:
pickle.dump(MatchDict, f)
df_GtMatrix = pd.DataFrame(data=GtMatrix, columns = keys, index = keys)
df_GtMatrix.to_csv('./matching/featdata/GtMatrix_noplane.csv',index=True)
df_similarity = pd.DataFrame(data=Similarity, columns = keys, index = keys)
df_similarity.to_csv('./matching/featdata/Similarity_noplane.csv',index=True)
def sortN_matching(filename = r'./matching/featdata/MatchDict.pkl'):
SimilarDict = np.load(filename, allow_pickle=True)
'''********** keys的顺序与Similarity中行列值索引一一对应 **********'''
keys = SimilarDict['keys']
Similarity = SimilarDict['Similarity']
'''1. 将时间根据 Barcode 归并,并确保每个 Barcode 下至少两个事件'''
BarcodeDict1 = {}
for i, key in enumerate(keys):
barcode = key.split('_')[0]
if barcode not in BarcodeDict1.keys():
BarcodeDict1[barcode] = []
BarcodeDict1[barcode].append(i)
BarcodeDict = {}
BarcodeList = []
for barcode, value in BarcodeDict1.items():
if len(value) < 2: continue
BarcodeDict[barcode] = value
BarcodeList.append(barcode)
BarcodeList = list(set(BarcodeList))
'''实验参数设定
N: 任意选取的 Barcode 数
R重复实验次数每次从同一 Barcode 下随机选取2个事件分别归入加购、退购集合
Thresh相似度阈值
'''
N = 10
if N > len(BarcodeList):
N = math.ceil(len(BarcodeList)/2)
R = 20
Thresh = np.linspace(0.1, 1, 100)
# Thresh = np.linspace(0.601, 0.7, 100)
Recall, Precision = [], []
for th in Thresh:
recall = np.zeros((1, R), dtype=np.float32)
precision = np.zeros((1, R), dtype=np.float32)
for rep in range(R):
BarcodeSelect = random.sample(BarcodeList, N)
AddDict = {}
TakeoutDict = {}
for barcode in BarcodeSelect:
barlist = BarcodeDict[barcode]
if len(barlist) < 2:continue
selected = random.sample(barlist, 2)
AddDict[barcode] = selected[0]
TakeoutDict[barcode] = selected[1]
OrderMatrix = np.zeros((N, N), dtype=np.float32)
GTMatrix = np.zeros((N, N), dtype=np.float32)
MatchMatrix_1 = np.zeros((N, N), dtype=np.float32)
MatchMatrix_2 = np.zeros((N, N), dtype=np.float32)
i = 0
for keyi in BarcodeSelect:
ii = TakeoutDict[keyi]
j = 0
for keyj in BarcodeSelect:
jj = AddDict[keyj]
OrderMatrix[i, j] = Similarity[int(ii), int(jj)]
if keyi == keyj:
GTMatrix[i, j] = 1
j += 1
i += 1
max_indices = np.argmax(OrderMatrix, axis = 1)
for i in range(N):
MatchMatrix_1[i, max_indices[i]] = 1
similar = OrderMatrix[i, max_indices[i]]
if similar > th:
MatchMatrix_2[i, max_indices[i]] = 1
GT_indices = np.where(GTMatrix == 1)
FNTP = MatchMatrix_2[GT_indices]
pred_indices = np.where(MatchMatrix_2 == 1)
TP = np.sum(FNTP==1)
FN = np.sum(FNTP==0)
FPTP = GTMatrix[pred_indices]
FP = np.sum(FPTP == 0)
# assert TP == np.sum(FPTP == 0), "Please Check Errors!!!"
recall[0, rep] = TP/(TP+FN)
precision[0, rep] = TP/(TP+FP+1e-3) # 阈值太大时可能TP、FP都为0
Recall.append(recall)
Precision.append(precision)
Recall = np.asarray(Recall).reshape([len(Thresh),-1])
Precision = np.asarray(Precision).reshape([len(Thresh),-1])
reclmean = np.sum(Recall, axis=1) / (np.count_nonzero(Recall, axis=1) + 1e-3)
precmean = np.sum(Precision, axis=1) / (np.count_nonzero(Precision, axis=1) + 1e-3)
print("Done!!!!!")
# th1, recl = [c[0] for c in Recall], [c[1] for c in Recall]
# th2, prep = [c[0] for c in Precision], [c[1] for c in Precision]
recl = [r for r in reclmean]
prep = [p for p in precmean]
'''================= Precision & Recall ================='''
fig, ax = plt.subplots()
ax.plot(Thresh, recl, 'g', label='Recall = TP/(TP+FN)')
ax.plot(Thresh, prep, 'r', label='PrecisePos = TP/(TP+FP)')
# ax.set_xlim([0, 1])
# ax.set_ylim([0, 1])
ax.grid(True)
ax.set_title('Precision & Recall')
ax.legend()
plt.show()
def match_evaluate(filename = r'./matching/featdata/MatchDict.pkl'):
SimiDict = np.load(filename, allow_pickle=True)
keys = SimiDict['keys']
GtMatrix = SimiDict['GtMatrix']
Similarity = SimiDict['Similarity']
IntraMatrix = SimiDict['IntraMatrix']
InterMatrix = SimiDict['InterMatrix']
BarcodeList = []
for key in keys:
BarcodeList.append(key.split('_')[0])
BarcodeList = list(set(BarcodeList))
IntraRows, IntraCols = np.nonzero(IntraMatrix)
InterRows, InterCols = np.nonzero(InterMatrix)
IntraN, InterN = len(IntraRows), len(InterRows)
assert IntraN <= InterN, "类内大于类间数,样本不平衡"
InterNSelect = IntraN
Thresh = np.linspace(0.1, 1, 100)
# Thresh = np.linspace(0.2, 0.4, 11)
Correct = []
PrecisePos = []
PreciseNeg = []
Recall = []
CorrectMatries = []
for th in Thresh:
MatchMatrix = Similarity > th
CorrectMatrix = MatchMatrix == GtMatrix
CorrectMatries.append(CorrectMatrix)
nn = np.random.permutation(np.arange(InterN))[:InterNSelect]
InterRowsSelect, InterColsSelect = InterRows[nn], InterCols[nn]
IntraCorrMatrix = CorrectMatrix[IntraRows, IntraCols]
InterCorrMatrix = CorrectMatrix[InterRowsSelect, InterColsSelect]
TP = np.sum(IntraCorrMatrix)
TN = np.sum(InterCorrMatrix)
FN = IntraN - TP
FP = InterNSelect - TN
if TP+FP > 0:
PrecisePos.append((th, TP/(TP+FP)))
if TN+FN > 0:
PreciseNeg.append((th, TN/(TN+FN)))
if TP+FN > 0:
Recall.append((th, TP/(TP+FN)))
if TP+TN+FP+FN > 0:
Correct.append((th, (TP+TN)/(TP+TN+FP+FN)))
# print(f'Th: {th}')
# print(f'TP:{TP}, FP:{FP}, TN:{TN}, FN:{FN}')
CorrectMatries = np.asarray(CorrectMatries)
'''====================== 分析错误原因 =========================='''
'''
keys两种构成方式其中的元素来自于MatchingDict
BarCode, SampleTime, ActionType #不考虑摄像头类型(前后摄)
BarCode, SampleTime, CameraType, ActionType# 考虑摄像头类型(前后摄)
为了便于显示,在图像文件名中,将 ActionType 进行了缩写,匹配时取 [:3]
"addGood" --------> "add"
"returnGood" --------> "return"
'''
##============= 获取图像存储位置,可以通过 keys 检索到对应的图像文件
imgpath = r'./matching/images/'
ImgFileList = get_img_filename(imgpath)
rowx, colx = np.where(CorrectMatries[66,:,:] == False)
rows, cols = [], []
for i in range(len(rowx)):
ri, ci = rowx[i], colx[i]
if ci > ri:
rows.append(ri)
cols.append(ci)
KeysError = [(keys[rows[i]], keys[cols[i]]) for i in range(len(rows))]
SimiScore = [Similarity[rows[i], cols[i]] for i in range(len(rows))]
for i, keykey in enumerate(KeysError):
key1, key2 = keykey
sscore = SimiScore[i]
kt1, kt2 = key1.split('_'), key2.split('_')
if len(kt1)==3 and len(kt2)==3:
file1 = [f for f in ImgFileList if kt1[0] in f and kt1[1] in f and kt1[2][:3] in f]
file2 = [f for f in ImgFileList if kt2[0] in f and kt2[1] in f and kt2[2][:3] in f]
elif len(kt1)==4 and len(kt1)==4:
file1 = [f for f in ImgFileList if kt1[0] in f and kt1[1] in f and kt1[2] in f and kt1[3][:3] in f]
file2 = [f for f in ImgFileList if kt2[0] in f and kt2[1] in f and kt2[2] in f and kt2[3][:3] in f]
else:
pass
if len(file1)==0 or len(file2)==0:
continue
if kt1[0] == kt2[0]:
gt = "same"
else:
gt = "diff"
path = Path(f'./matching/results/{i}_{gt}_{sscore:.2f}')
if path.exists() and path.is_dir():
shutil.rmtree(path)
path1, path2 = path.joinpath(key1), path.joinpath(key2)
path1.mkdir(parents=True, exist_ok=True)
path2.mkdir(parents=True, exist_ok=True)
for file in file1:
shutil.copy2(file, path1)
for file in file2:
shutil.copy2(file, path2)
if i==99:
break
th1, corr = [c[0] for c in Correct], [c[1] for c in Correct]
th2, recl = [c[0] for c in Recall], [c[1] for c in Recall]
th3, prep = [c[0] for c in PrecisePos], [c[1] for c in PrecisePos]
th4, pren = [c[0] for c in PreciseNeg], [c[1] for c in PreciseNeg]
'''================= Correct ==================='''
fig, ax = plt.subplots()
ax.plot(th1, corr, 'b', label='Correct = (TP+TN)/(TP+TN+FP+FN)')
max_corr = max(corr)
max_index = corr.index(max_corr)
max_thresh = th1[max_index]
ax.plot([0, max_thresh], [max_corr, max_corr], 'r--')
ax.plot([max_thresh, max_thresh], [0, max_corr], 'r--')
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.grid(True)
ax.set_title('Correct')
ax.legend()
plt.show()
'''================= PrecisePos & PreciseNeg & Recall ================='''
fig, ax = plt.subplots()
ax.plot(th2, recl, 'g', label='Recall = TP/(TP+FN)')
ax.plot(th3, prep, 'c', label='PrecisePos = TP/(TP+FP)')
ax.plot(th4, pren, 'm', label='PreciseNeg = TN/(TN+FN)')
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.grid(True)
ax.set_title('PrecisePos & PreciseNeg')
ax.legend()
plt.show()
def have_tracked():
featdir = r"./data/trackfeats"
trackdir = r"./data/tracks"
# =============================================================================
# FileList = []
# with open(r'./matching/视频分类/单.txt', 'r') as file:
# lines = file.readlines()
# for line in lines:
# file = line.split('.')[0]
# FileList.append(file)
# FileList = list(set(FileList))
# =============================================================================
MatchingDict = {}
k, gt = 0, Profile()
for filename in os.listdir(featdir):
file, ext = os.path.splitext(filename)
# if file not in FileList: continue
if file.find('20240508')<0: continue
if file.find('17327712807')<0: continue
trackpath = os.path.join(trackdir, file + ".npy")
featpath = os.path.join(featdir, filename)
bboxes = np.load(trackpath)
features_dict = np.load(featpath, allow_pickle=True)
with gt:
if filename.find("front") >= 0:
vts = doFrontTracks(bboxes, features_dict)
vts.classify()
plt = plot_frameID_y2(vts)
savedir = save_dir.joinpath(f'{file}_y2.png')
plt.savefig(savedir)
plt.close()
elif filename.find("back") >= 0:
vts = doBackTracks(bboxes, features_dict)
vts.classify()
edgeline = cv2.imread("./shopcart/cart_tempt/edgeline.png")
draw_all_trajectories(vts, edgeline, save_dir, filename)
print(file+f" need time: {gt.dt:.2f}s")
elements = file.split('_')
assert len(elements) == 7, f"{filename} fields num: {len(elements)}"
BarCode = elements[0]
## ====================================== 只用于在images文件夹下保存图片
SampleTime = elements[1].split('-')[1]
CameraType = elements[2]
if elements[3]=="addGood":
ActionType = "add"
elif elements[3]=="returnGood":
ActionType = "return"
else:
ActionType = "x"
subimg_dir = Path(f'./matching/images/{BarCode}_{SampleTime}_{ActionType}/')
if not subimg_dir.exists():
subimg_dir.mkdir(parents=True, exist_ok=True)
# boxes: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]
# 0, 1, 2, 3, 4, 5, 6, 7, 8
for track in vts.Residual:
boxes = track.boxes
for i in range(boxes.shape[0]):
box = boxes[i, :]
tid, fid, bid = int(box[4]), int(box[7]), int(box[8])
feat_dict = features_dict[fid]
feature = feat_dict[bid]
img = feat_dict[f'{bid}_img']
sub_img_file = subimg_dir.joinpath(f"{BarCode}_{SampleTime}_{CameraType}_{ActionType}_{tid}_{fid}_{bid}.png")
cv2.imwrite(str(sub_img_file), img)
condict = {f"{file}_{tid}_{fid}_{bid}": {'img': img, 'feature': feature}}
MatchingDict.update(condict)
# k += 1
# if k == 100:
# break
featpath = Path('./matching/featdata/')
if not featpath.exists():
featpath.mkdir(parents=True, exist_ok=True)
featdata = featpath.joinpath('imgs_feats_data_noplane.pkl')
with open(featdata, 'wb') as file:
pickle.dump(MatchingDict, file)
def imgsample_cleaning():
ContrastDict = np.load('./matching/featdata/imgs_feats_data.pkl', allow_pickle=True)
print(f"The Num of imgsample: {len(ContrastDict)}")
MatchingDict_refined = {}
for filename, value in ContrastDict.items():
elements = filename.split('_')
tid = elements[7]
fid = elements[8]
bid = elements[9]
BarCode = elements[0]
SampleTime = elements[1].split('-')[1]
CameraType = elements[2]
if elements[3]=="addGood":
ActionType = "add"
elif elements[3]=="returnGood":
ActionType = "return"
else:
ActionType = "x"
refimgdir = f'.\matching\images_refined\{BarCode}_{SampleTime}_{ActionType}'
file = '_'.join(elements[0:7])
if os.path.exists(refimgdir) and os.path.isdir(refimgdir):
imgpath = os.path.join(refimgdir, f"{BarCode}_{SampleTime}_{CameraType}_{ActionType}_{tid}_{fid}_{bid}.png")
if os.path.isfile(imgpath):
condict = {f"{file}_{tid}_{fid}_{bid}": value}
MatchingDict_refined.update(condict)
featpath = Path('./matching/featdata/')
if not featpath.exists():
featpath.mkdir(parents=True, exist_ok=True)
featdata = featpath.joinpath('imgs_feats_data_refined.pkl')
with open(featdata, 'wb') as file:
pickle.dump(MatchingDict_refined, file)
print(f"The Num of ContrastDict: {len(ContrastDict)}")
print(f"The Num of MatchingDict_refined: {len(MatchingDict_refined)}")
print(f"The Num of cleaned img: {len(ContrastDict)} - {len(MatchingDict_refined)}")
def main():
'''1. 提取运动商品轨迹'''
# have_tracked()
'''2. 清除一次事件中包含多件商品的事件'''
# imgsample_cleaning()
'''3.1 计算事件间相似度: 将 front、back 的所有 track 特征合并'''
# calculate_similarity()
'''3.2 计算事件间相似度: 考虑前后摄的不同组合,或 track 间的不同组合'''
# calculate_similarity_track()
'''4.1 事件间匹配的总体性能评估'''
filename = r'./matching/featdata/MatchDict_plane.pkl'
match_evaluate(filename)
filename = r'./matching/featdata/MatchDict_noplane.pkl'
match_evaluate(filename)
'''4.2 模拟实际场景任选N件作为一组作为加购取出其中一件时的性能评估'''
# filename = r'./matching/featdata/MatchDict_refined.pkl'
# sortN_matching(filename)
if __name__ == "__main__":
save_dir = Path(f'./result/')
main()

View File

@ -0,0 +1,807 @@
230537101280010007_20240411-144918_back_addGood_70f75407b7ae_570_17788571404.mp4
230537101280010007_20240411-144918_front_addGood_70f75407b7ae_570_17788571404.mp4
230537101280010007_20240411-144945_back_returnGood_70f75407b7ae_565_17788571404.mp4
230537101280010007_20240411-144945_front_returnGood_70f75407b7ae_565_17788571404.mp4
230538001280010009_20240411-144924_back_addGood_70f754088050_550_17327712807.mp4
230538001280010009_20240411-144924_front_addGood_70f754088050_550_17327712807.mp4
230538001280010009_20240411-144934_back_returnGood_70f754088050_550_17327712807.mp4
230538001280010009_20240411-144934_front_returnGood_70f754088050_550_17327712807.mp4
2500456001326_20240411-145321_back_addGood_70f75407b7ae_155_17788571404.mp4
2500456001326_20240411-145321_front_addGood_70f75407b7ae_155_17788571404.mp4
2500456001326_20240411-145327_back_returnGood_70f75407b7ae_155_17788571404.mp4
2500456001326_20240411-145327_front_returnGood_70f75407b7ae_155_17788571404.mp4
2500456001326_20240411-145330_back_addGood_70f754088050_155_17327712807.mp4
2500456001326_20240411-145330_front_addGood_70f754088050_155_17327712807.mp4
2500456001326_20240411-145338_back_returnGood_70f754088050_155_17327712807.mp4
2500456001326_20240411-145338_front_returnGood_70f754088050_155_17327712807.mp4
2500458675341_20240411-144658_back_addGood_70f75407b7ae_140_17788571404.mp4
2500458675341_20240411-144658_front_addGood_70f75407b7ae_140_17788571404.mp4
2500458675341_20240411-144707_back_returnGood_70f75407b7ae_140_17788571404.mp4
2500458675341_20240411-144707_front_returnGood_70f75407b7ae_140_17788571404.mp4
2500458675341_20240411-144711_back_addGood_70f754088050_135_17327712807.mp4
2500458675341_20240411-144711_front_addGood_70f754088050_135_17327712807.mp4
2500458675341_20240411-144718_back_returnGood_70f754088050_135_17327712807.mp4
2500458675341_20240411-144718_front_returnGood_70f754088050_135_17327712807.mp4
2500463464671_20240411-145041_back_addGood_70f75407b7ae_805_17788571404.mp4
2500463464671_20240411-145041_front_addGood_70f75407b7ae_805_17788571404.mp4
2500463464671_20240411-145042_back_addGood_70f754088050_815_17327712807.mp4
2500463464671_20240411-145042_front_addGood_70f754088050_815_17327712807.mp4
2500463464671_20240411-145049_back_returnGood_70f754088050_815_17327712807.mp4
2500463464671_20240411-145049_front_returnGood_70f754088050_815_17327712807.mp4
2500463464671_20240411-145053_back_returnGood_70f75407b7ae_810_17788571404.mp4
2500463464671_20240411-145053_front_returnGood_70f75407b7ae_810_17788571404.mp4
6901070613142_20240411-142722_back_addGood_70f754088050_240_17327712807.mp4
6901070613142_20240411-142722_front_addGood_70f754088050_240_17327712807.mp4
6901070613142_20240411-142725_back_addGood_70f75407b7ae_240_17788571404.mp4
6901070613142_20240411-142725_front_addGood_70f75407b7ae_240_17788571404.mp4
6901070613142_20240411-142730_back_returnGood_70f754088050_240_17327712807.mp4
6901070613142_20240411-142730_front_returnGood_70f754088050_240_17327712807.mp4
6901070613142_20240411-142734_back_returnGood_70f75407b7ae_240_17788571404.mp4
6901070613142_20240411-142734_front_returnGood_70f75407b7ae_240_17788571404.mp4
6901668053893_20240411-143608_back_addGood_70f75407b7ae_70_17788571404.mp4
6901668053893_20240411-143608_back_addGood_70f754088050_70_17327712807.mp4
6901668053893_20240411-143608_front_addGood_70f75407b7ae_70_17788571404.mp4
6901668053893_20240411-143608_front_addGood_70f754088050_70_17327712807.mp4
6901668053893_20240411-143616_back_returnGood_70f754088050_70_17327712807.mp4
6901668053893_20240411-143616_front_returnGood_70f754088050_70_17327712807.mp4
6901668053893_20240411-143617_back_returnGood_70f75407b7ae_70_17788571404.mp4
6901668053893_20240411-143617_front_returnGood_70f75407b7ae_70_17788571404.mp4
6902007010249_20240411-142528_back_addGood_70f75407b7ae_755_17788571404.mp4
6902007010249_20240411-142528_back_addGood_70f754088050_755_17327712807.mp4
6902007010249_20240411-142528_front_addGood_70f75407b7ae_755_17788571404.mp4
6902007010249_20240411-142528_front_addGood_70f754088050_755_17327712807.mp4
6902007010249_20240411-142535_back_returnGood_70f75407b7ae_755_17788571404.mp4
6902007010249_20240411-142535_front_returnGood_70f75407b7ae_755_17788571404.mp4
6902007010249_20240411-142541_back_returnGood_70f754088050_755_17327712807.mp4
6902007010249_20240411-142541_front_returnGood_70f754088050_755_17327712807.mp4
6902022135514_20240411-142819_back_addGood_70f75407b7ae_3180_17788571404.mp4
6902022135514_20240411-142819_front_addGood_70f75407b7ae_3180_17788571404.mp4
6902022135514_20240411-142828_back_addGood_70f754088050_3185_17327712807.mp4
6902022135514_20240411-142828_front_addGood_70f754088050_3185_17327712807.mp4
6902022135514_20240411-142830_back_returnGood_70f75407b7ae_3180_17788571404.mp4
6902022135514_20240411-142830_front_returnGood_70f75407b7ae_3180_17788571404.mp4
6902022135514_20240411-142840_back_returnGood_70f754088050_3185_17327712807.mp4
6902022135514_20240411-142840_front_returnGood_70f754088050_3185_17327712807.mp4
6902265114369_20240411-142331_back_addGood_70f75407b7ae_715_17788571404.mp4
6902265114369_20240411-142331_front_addGood_70f75407b7ae_715_17788571404.mp4
6902265114369_20240411-142338_back_returnGood_70f75407b7ae_720_17788571404.mp4
6902265114369_20240411-142338_front_returnGood_70f75407b7ae_720_17788571404.mp4
6902265114369_20240411-142355_back_addGood_70f754088050_720_17327712807.mp4
6902265114369_20240411-142355_front_addGood_70f754088050_720_17327712807.mp4
6902265114369_20240411-142403_back_returnGood_70f754088050_715_17327712807.mp4
6902265114369_20240411-142403_front_returnGood_70f754088050_715_17327712807.mp4
6902265908012_20240411-142446_back_addGood_70f75407b7ae_1150_17788571404.mp4
6902265908012_20240411-142446_front_addGood_70f75407b7ae_1150_17788571404.mp4
6902265908012_20240411-142447_back_addGood_70f754088050_1150_17327712807.mp4
6902265908012_20240411-142447_front_addGood_70f754088050_1150_17327712807.mp4
6902265908012_20240411-142456_back_returnGood_70f75407b7ae_1150_17788571404.mp4
6902265908012_20240411-142456_front_returnGood_70f75407b7ae_1150_17788571404.mp4
6902265908012_20240411-142459_back_returnGood_70f754088050_1150_17327712807.mp4
6902265908012_20240411-142459_front_returnGood_70f754088050_1150_17327712807.mp4
69025143_20240411-163325_back_addGood_70f75407b7ae_3385_17788571404.mp4
69025143_20240411-163325_front_addGood_70f75407b7ae_3385_17788571404.mp4
69025143_20240411-163352_back_returnGood_70f75407b7ae_3390_17788571404.mp4
69025143_20240411-163352_front_returnGood_70f75407b7ae_3390_17788571404.mp4
69025143_20240411-163417_back_addGood_70f754088050_3380_17327712807.mp4
69025143_20240411-163417_front_addGood_70f754088050_3380_17327712807.mp4
69025143_20240411-163428_back_returnGood_70f754088050_3380_17327712807.mp4
69025143_20240411-163428_front_returnGood_70f754088050_3380_17327712807.mp4
6902538007367_20240411-144030_back_addGood_70f754088050_660_17327712807.mp4
6902538007367_20240411-144030_front_addGood_70f754088050_660_17327712807.mp4
6902538007367_20240411-144031_back_addGood_70f75407b7ae_660_17788571404.mp4
6902538007367_20240411-144031_front_addGood_70f75407b7ae_660_17788571404.mp4
6902538007367_20240411-144037_back_returnGood_70f75407b7ae_660_17788571404.mp4
6902538007367_20240411-144037_front_returnGood_70f75407b7ae_660_17788571404.mp4
6902538007367_20240411-144040_back_returnGood_70f754088050_660_17327712807.mp4
6902538007367_20240411-144040_front_returnGood_70f754088050_660_17327712807.mp4
69028571_20240411-163728_back_addGood_70f75407b7ae_1845_17788571404.mp4
69028571_20240411-163728_front_addGood_70f75407b7ae_1845_17788571404.mp4
69028571_20240411-163739_back_returnGood_70f75407b7ae_1845_17788571404.mp4
69028571_20240411-163739_front_returnGood_70f75407b7ae_1845_17788571404.mp4
69028571_20240411-163853_back_addGood_70f754088050_1840_17327712807.mp4
69028571_20240411-163853_front_addGood_70f754088050_1840_17327712807.mp4
69028571_20240411-163904_back_returnGood_70f754088050_1840_17327712807.mp4
69028571_20240411-163904_front_returnGood_70f754088050_1840_17327712807.mp4
6907992103952_20240411-142013_back_addGood_70f75407b7ae_190_17788571404.mp4
6907992103952_20240411-142013_front_addGood_70f75407b7ae_190_17788571404.mp4
6907992103952_20240411-142021_back_returnGood_70f75407b7ae_190_17788571404.mp4
6907992103952_20240411-142021_front_returnGood_70f75407b7ae_190_17788571404.mp4
6907992103952_20240411-142029_back_addGood_70f754088050_190_17327712807.mp4
6907992103952_20240411-142029_front_addGood_70f754088050_190_17327712807.mp4
6907992103952_20240411-142036_back_returnGood_70f754088050_190_17327712807.mp4
6907992103952_20240411-142036_front_returnGood_70f754088050_190_17327712807.mp4
6907992104157_20240411-141633_back_addGood_70f754088050_1120_17327712807.mp4
6907992104157_20240411-141633_front_addGood_70f754088050_1120_17327712807.mp4
6907992104157_20240411-141637_back_addGood_70f75407b7ae_1120_17788571404.mp4
6907992104157_20240411-141637_front_addGood_70f75407b7ae_1120_17788571404.mp4
6907992104157_20240411-141646_back_returnGood_70f75407b7ae_1120_17788571404.mp4
6907992104157_20240411-141646_back_returnGood_70f754088050_1120_17327712807.mp4
6907992104157_20240411-141646_front_returnGood_70f75407b7ae_1120_17788571404.mp4
6907992104157_20240411-141646_front_returnGood_70f754088050_1120_17327712807.mp4
6907992105765_20240411-142214_back_addGood_70f754088050_1860_17327712807.mp4
6907992105765_20240411-142214_front_addGood_70f754088050_1860_17327712807.mp4
6907992105765_20240411-142220_back_addGood_70f75407b7ae_1855_17788571404.mp4
6907992105765_20240411-142220_front_addGood_70f75407b7ae_1855_17788571404.mp4
6907992105765_20240411-142228_back_returnGood_70f754088050_1860_17327712807.mp4
6907992105765_20240411-142228_front_returnGood_70f754088050_1860_17327712807.mp4
6907992105765_20240411-142230_back_returnGood_70f75407b7ae_1855_17788571404.mp4
6907992105765_20240411-142230_front_returnGood_70f75407b7ae_1855_17788571404.mp4
6907992106113_20240411-142100_back_addGood_70f754088050_3085_17327712807.mp4
6907992106113_20240411-142100_front_addGood_70f754088050_3085_17327712807.mp4
6907992106113_20240411-142133_back_returnGood_70f754088050_3085_17327712807.mp4
6907992106113_20240411-142133_front_returnGood_70f754088050_3085_17327712807.mp4
6907992106113_20240411-142149_back_addGood_70f75407b7ae_3085_17788571404.mp4
6907992106113_20240411-142149_front_addGood_70f75407b7ae_3085_17788571404.mp4
6907992106113_20240411-142157_back_returnGood_70f75407b7ae_3080_17788571404.mp4
6907992106113_20240411-142157_front_returnGood_70f75407b7ae_3080_17788571404.mp4
6907992106205_20240411-141741_back_addGood_70f75407b7ae_795_17788571404.mp4
6907992106205_20240411-141741_front_addGood_70f75407b7ae_795_17788571404.mp4
6907992106205_20240411-141750_back_returnGood_70f75407b7ae_795_17788571404.mp4
6907992106205_20240411-141750_front_returnGood_70f75407b7ae_795_17788571404.mp4
6907992106205_20240411-141806_back_addGood_70f754088050_795_17327712807.mp4
6907992106205_20240411-141806_front_addGood_70f754088050_795_17327712807.mp4
6907992106205_20240411-141815_back_returnGood_70f754088050_795_17327712807.mp4
6907992106205_20240411-141815_front_returnGood_70f754088050_795_17327712807.mp4
6907992106311_20240411-141711_back_addGood_70f754088050_880_17327712807.mp4
6907992106311_20240411-141711_front_addGood_70f754088050_880_17327712807.mp4
6907992106311_20240411-141713_back_addGood_70f75407b7ae_885_17788571404.mp4
6907992106311_20240411-141713_front_addGood_70f75407b7ae_885_17788571404.mp4
6907992106311_20240411-141721_back_returnGood_70f75407b7ae_885_17788571404.mp4
6907992106311_20240411-141721_front_returnGood_70f75407b7ae_885_17788571404.mp4
6907992106311_20240411-141726_back_returnGood_70f754088050_880_17327712807.mp4
6907992106311_20240411-141726_front_returnGood_70f754088050_880_17327712807.mp4
6914973602908_20240411-162105_back_returnGood_70f75407b7ae_5_17788571404.mp4
6914973602908_20240411-162105_front_returnGood_70f75407b7ae_5_17788571404.mp4
6914973602908_20240411-162113_back_returnGood_70f75407b7ae_740_17788571404.mp4
6914973602908_20240411-162113_front_returnGood_70f75407b7ae_740_17788571404.mp4
6914973602908_20240411-162420_back_addGood_70f754088050_740_17327712807.mp4
6914973602908_20240411-162420_front_addGood_70f754088050_740_17327712807.mp4
6914973602908_20240411-162434_back_returnGood_70f754088050_740_17327712807.mp4
6914973602908_20240411-162434_front_returnGood_70f754088050_740_17327712807.mp4
6914973606340_20240411-161927_back_addGood_70f75407b7ae_350_17788571404.mp4
6914973606340_20240411-161927_front_addGood_70f75407b7ae_350_17788571404.mp4
6914973606340_20240411-161936_back_returnGood_70f75407b7ae_355_17788571404.mp4
6914973606340_20240411-161936_front_returnGood_70f75407b7ae_355_17788571404.mp4
6914973606340_20240411-162001_back_addGood_70f754088050_5_17327712807.mp4
6914973606340_20240411-162001_front_addGood_70f754088050_5_17327712807.mp4
6914973606340_20240411-162009_back_returnGood_70f754088050_355_17327712807.mp4
6914973606340_20240411-162009_front_returnGood_70f754088050_355_17327712807.mp4
6920152400630_20240411-144222_back_addGood_70f75407b7ae_580_17788571404.mp4
6920152400630_20240411-144222_front_addGood_70f75407b7ae_580_17788571404.mp4
6920152400630_20240411-144223_back_addGood_70f754088050_570_17327712807.mp4
6920152400630_20240411-144223_front_addGood_70f754088050_570_17327712807.mp4
6920152400630_20240411-144228_back_returnGood_70f75407b7ae_575_17788571404.mp4
6920152400630_20240411-144228_front_returnGood_70f75407b7ae_575_17788571404.mp4
6920152400630_20240411-144236_back_returnGood_70f754088050_570_17327712807.mp4
6920152400630_20240411-144236_front_returnGood_70f754088050_570_17327712807.mp4
6920174757101_20240411-143201_back_addGood_70f75407b7ae_1305_17788571404.mp4
6920174757101_20240411-143201_front_addGood_70f75407b7ae_1305_17788571404.mp4
6920174757101_20240411-143202_back_addGood_70f754088050_1305_17327712807.mp4
6920174757101_20240411-143202_front_addGood_70f754088050_1305_17327712807.mp4
6920174757101_20240411-143208_back_returnGood_70f75407b7ae_1305_17788571404.mp4
6920174757101_20240411-143208_front_returnGood_70f75407b7ae_1305_17788571404.mp4
6920174757101_20240411-143211_back_returnGood_70f754088050_1305_17327712807.mp4
6920174757101_20240411-143211_front_returnGood_70f754088050_1305_17327712807.mp4
6920459905012_20240411-143922_back_addGood_70f75407b7ae_550_17788571404.mp4
6920459905012_20240411-143922_front_addGood_70f75407b7ae_550_17788571404.mp4
6920459905012_20240411-143930_back_returnGood_70f75407b7ae_555_17788571404.mp4
6920459905012_20240411-143930_front_returnGood_70f75407b7ae_555_17788571404.mp4
6920459905012_20240411-143947_back_addGood_70f754088050_550_17327712807.mp4
6920459905012_20240411-143947_front_addGood_70f754088050_550_17327712807.mp4
6920459905012_20240411-143956_back_returnGood_70f754088050_550_17327712807.mp4
6920459905012_20240411-143956_front_returnGood_70f754088050_550_17327712807.mp4
6920907810707_20240411-143338_back_addGood_70f754088050_75_17327712807.mp4
6920907810707_20240411-143338_front_addGood_70f754088050_75_17327712807.mp4
6920907810707_20240411-143339_back_addGood_70f75407b7ae_80_17788571404.mp4
6920907810707_20240411-143339_front_addGood_70f75407b7ae_80_17788571404.mp4
6920907810707_20240411-143347_back_returnGood_70f754088050_80_17327712807.mp4
6920907810707_20240411-143347_front_returnGood_70f754088050_80_17327712807.mp4
6920907810707_20240411-143356_back_returnGood_70f75407b7ae_80_17788571404.mp4
6920907810707_20240411-143356_front_returnGood_70f75407b7ae_80_17788571404.mp4
6922130119213_20240411-142631_back_addGood_70f754088050_1020_17327712807.mp4
6922130119213_20240411-142631_front_addGood_70f754088050_1020_17327712807.mp4
6922130119213_20240411-142636_back_addGood_70f75407b7ae_1020_17788571404.mp4
6922130119213_20240411-142636_front_addGood_70f75407b7ae_1020_17788571404.mp4
6922130119213_20240411-142641_back_returnGood_70f754088050_1020_17327712807.mp4
6922130119213_20240411-142641_front_returnGood_70f754088050_1020_17327712807.mp4
6922130119213_20240411-142647_back_returnGood_70f75407b7ae_1020_17788571404.mp4
6922130119213_20240411-142647_front_returnGood_70f75407b7ae_1020_17788571404.mp4
6922577700968_20240411-141822_back_addGood_70f75407b7ae_1040_17788571404.mp4
6922577700968_20240411-141822_front_addGood_70f75407b7ae_1040_17788571404.mp4
6922577700968_20240411-141834_back_addGood_70f754088050_1045_17327712807.mp4
6922577700968_20240411-141834_front_addGood_70f754088050_1045_17327712807.mp4
6922577700968_20240411-141839_back_returnGood_70f75407b7ae_1045_17788571404.mp4
6922577700968_20240411-141839_front_returnGood_70f75407b7ae_1045_17788571404.mp4
6922577700968_20240411-141844_back_returnGood_70f754088050_1045_17327712807.mp4
6922577700968_20240411-141844_front_returnGood_70f754088050_1045_17327712807.mp4
6922868291168_20240411-142913_back_addGood_70f754088050_1160_17327712807.mp4
6922868291168_20240411-142913_front_addGood_70f754088050_1160_17327712807.mp4
6922868291168_20240411-142921_back_addGood_70f75407b7ae_1150_17788571404.mp4
6922868291168_20240411-142921_front_addGood_70f75407b7ae_1150_17788571404.mp4
6922868291168_20240411-142929_back_returnGood_70f754088050_1160_17327712807.mp4
6922868291168_20240411-142929_front_returnGood_70f754088050_1160_17327712807.mp4
6922868291168_20240411-142933_back_returnGood_70f75407b7ae_1155_17788571404.mp4
6922868291168_20240411-142933_front_returnGood_70f75407b7ae_1155_17788571404.mp4
6923450601549_20240411-162014_back_addGood_70f75407b7ae_600_17788571404.mp4
6923450601549_20240411-162014_front_addGood_70f75407b7ae_600_17788571404.mp4
6923450601549_20240411-162024_back_returnGood_70f75407b7ae_600_17788571404.mp4
6923450601549_20240411-162024_front_returnGood_70f75407b7ae_600_17788571404.mp4
6923450601549_20240411-162216_back_addGood_70f754088050_595_17327712807.mp4
6923450601549_20240411-162216_front_addGood_70f754088050_595_17327712807.mp4
6923450601549_20240411-162227_back_returnGood_70f754088050_595_17327712807.mp4
6923450601549_20240411-162227_front_returnGood_70f754088050_595_17327712807.mp4
6923450603574_20240411-163042_back_addGood_70f75407b7ae_870_17788571404.mp4
6923450603574_20240411-163042_front_addGood_70f75407b7ae_870_17788571404.mp4
6923450603574_20240411-163049_back_returnGood_70f75407b7ae_870_17788571404.mp4
6923450603574_20240411-163049_front_returnGood_70f75407b7ae_870_17788571404.mp4
6923450603574_20240411-163104_back_addGood_70f754088050_865_17327712807.mp4
6923450603574_20240411-163104_front_addGood_70f754088050_865_17327712807.mp4
6923450603574_20240411-163114_back_returnGood_70f754088050_865_17327712807.mp4
6923450603574_20240411-163114_front_returnGood_70f754088050_865_17327712807.mp4
6923450605288_20240411-161704_back_addGood_70f75407b7ae_250_17788571404.mp4
6923450605288_20240411-161704_front_addGood_70f75407b7ae_250_17788571404.mp4
6923450605288_20240411-161715_back_returnGood_70f75407b7ae_250_17788571404.mp4
6923450605288_20240411-161715_front_returnGood_70f75407b7ae_250_17788571404.mp4
6923450605288_20240411-161748_back_addGood_70f754088050_245_17327712807.mp4
6923450605288_20240411-161748_front_addGood_70f754088050_245_17327712807.mp4
6923450605288_20240411-161800_back_returnGood_70f754088050_245_17327712807.mp4
6923450605288_20240411-161800_front_returnGood_70f754088050_245_17327712807.mp4
6923450610428_20240411-161209_back_addGood_70f75407b7ae_275_17788571404.mp4
6923450610428_20240411-161209_front_addGood_70f75407b7ae_275_17788571404.mp4
6923450610428_20240411-161217_back_returnGood_70f75407b7ae_275_17788571404.mp4
6923450610428_20240411-161217_front_returnGood_70f75407b7ae_275_17788571404.mp4
6923450610428_20240411-161235_back_addGood_70f754088050_270_17327712807.mp4
6923450610428_20240411-161235_front_addGood_70f754088050_270_17327712807.mp4
6923450610428_20240411-161249_back_returnGood_70f754088050_275_17327712807.mp4
6923450610428_20240411-161249_front_returnGood_70f754088050_275_17327712807.mp4
6923450610459_20240411-162814_back_addGood_70f75407b7ae_445_17788571404.mp4
6923450610459_20240411-162814_front_addGood_70f75407b7ae_445_17788571404.mp4
6923450610459_20240411-162822_back_returnGood_70f75407b7ae_450_17788571404.mp4
6923450610459_20240411-162822_front_returnGood_70f75407b7ae_450_17788571404.mp4
6923450610459_20240411-162854_back_addGood_70f754088050_445_17327712807.mp4
6923450610459_20240411-162854_front_addGood_70f754088050_445_17327712807.mp4
6923450610459_20240411-162906_back_returnGood_70f754088050_445_17327712807.mp4
6923450610459_20240411-162906_front_returnGood_70f754088050_445_17327712807.mp4
6923450611067_20240411-162639_back_addGood_70f75407b7ae_625_17788571404.mp4
6923450611067_20240411-162639_front_addGood_70f75407b7ae_625_17788571404.mp4
6923450611067_20240411-162648_back_returnGood_70f75407b7ae_625_17788571404.mp4
6923450611067_20240411-162648_front_returnGood_70f75407b7ae_625_17788571404.mp4
6923450611067_20240411-162752_back_addGood_70f754088050_620_17327712807.mp4
6923450611067_20240411-162752_front_addGood_70f754088050_620_17327712807.mp4
6923450611067_20240411-162808_back_returnGood_70f754088050_625_17327712807.mp4
6923450611067_20240411-162808_front_returnGood_70f754088050_625_17327712807.mp4
6923450612415_20240411-160636_back_addGood_70f75407b7ae_870_17788571404.mp4
6923450612415_20240411-160636_front_addGood_70f75407b7ae_870_17788571404.mp4
6923450612415_20240411-160724_back_addGood_70f754088050_865_17327712807.mp4
6923450612415_20240411-160724_front_addGood_70f754088050_865_17327712807.mp4
6923450612415_20240411-160735_back_returnGood_70f754088050_865_17327712807.mp4
6923450612415_20240411-160735_front_returnGood_70f754088050_865_17327712807.mp4
6923450612415_20240411-161013_back_returnGood_70f75407b7ae_865_17788571404.mp4
6923450612415_20240411-161013_front_returnGood_70f75407b7ae_865_17788571404.mp4
6923450612484_20240411-161509_back_addGood_70f75407b7ae_445_17788571404.mp4
6923450612484_20240411-161509_front_addGood_70f75407b7ae_445_17788571404.mp4
6923450612484_20240411-161517_back_returnGood_70f75407b7ae_445_17788571404.mp4
6923450612484_20240411-161517_front_returnGood_70f75407b7ae_445_17788571404.mp4
6923450612484_20240411-161535_back_addGood_70f754088050_450_17327712807.mp4
6923450612484_20240411-161535_front_addGood_70f754088050_450_17327712807.mp4
6923450612484_20240411-161546_back_returnGood_70f754088050_450_17327712807.mp4
6923450612484_20240411-161546_front_returnGood_70f754088050_450_17327712807.mp4
6923450657829_20240411-163522_back_addGood_70f75407b7ae_985_17788571404.mp4
6923450657829_20240411-163522_front_addGood_70f75407b7ae_985_17788571404.mp4
6923450657829_20240411-163532_back_returnGood_70f75407b7ae_985_17788571404.mp4
6923450657829_20240411-163532_front_returnGood_70f75407b7ae_985_17788571404.mp4
6923450657829_20240411-163554_back_addGood_70f754088050_990_17327712807.mp4
6923450657829_20240411-163554_front_addGood_70f754088050_990_17327712807.mp4
6923450657829_20240411-163606_back_returnGood_70f754088050_990_17327712807.mp4
6923450657829_20240411-163606_front_returnGood_70f754088050_990_17327712807.mp4
6923450659441_20240411-163152_back_addGood_70f75407b7ae_1695_17788571404.mp4
6923450659441_20240411-163152_front_addGood_70f75407b7ae_1695_17788571404.mp4
6923450659441_20240411-163204_back_returnGood_70f75407b7ae_1695_17788571404.mp4
6923450659441_20240411-163204_front_returnGood_70f75407b7ae_1695_17788571404.mp4
6923450659441_20240411-163217_back_addGood_70f754088050_1690_17327712807.mp4
6923450659441_20240411-163217_front_addGood_70f754088050_1690_17327712807.mp4
6923450659441_20240411-163231_back_returnGood_70f754088050_1690_17327712807.mp4
6923450659441_20240411-163231_front_returnGood_70f754088050_1690_17327712807.mp4
6923450666838_20240411-162541_back_addGood_70f75407b7ae_285_17788571404.mp4
6923450666838_20240411-162541_front_addGood_70f75407b7ae_285_17788571404.mp4
6923450666838_20240411-162549_back_returnGood_70f75407b7ae_285_17788571404.mp4
6923450666838_20240411-162549_front_returnGood_70f75407b7ae_285_17788571404.mp4
6923450666838_20240411-162637_back_addGood_70f754088050_285_17327712807.mp4
6923450666838_20240411-162637_front_addGood_70f754088050_285_17327712807.mp4
6923450666838_20240411-162704_back_returnGood_70f754088050_285_17327712807.mp4
6923450666838_20240411-162704_front_returnGood_70f754088050_285_17327712807.mp4
6923450668207_20240411-161832_back_addGood_70f75407b7ae_350_17788571404.mp4
6923450668207_20240411-161832_front_addGood_70f75407b7ae_350_17788571404.mp4
6923450668207_20240411-161839_back_returnGood_70f75407b7ae_350_17788571404.mp4
6923450668207_20240411-161839_front_returnGood_70f75407b7ae_350_17788571404.mp4
6923450668207_20240411-161904_back_addGood_70f754088050_350_17327712807.mp4
6923450668207_20240411-161904_front_addGood_70f754088050_350_17327712807.mp4
6923450668207_20240411-161913_back_returnGood_70f754088050_355_17327712807.mp4
6923450668207_20240411-161913_front_returnGood_70f754088050_355_17327712807.mp4
6923450677858_20240411-163609_back_addGood_70f75407b7ae_5_17788571404.mp4
6923450677858_20240411-163609_front_addGood_70f75407b7ae_5_17788571404.mp4
6923450677858_20240411-163618_back_returnGood_70f75407b7ae_1020_17788571404.mp4
6923450677858_20240411-163618_front_returnGood_70f75407b7ae_1020_17788571404.mp4
6923450677858_20240411-163726_back_addGood_70f754088050_1025_17327712807.mp4
6923450677858_20240411-163726_front_addGood_70f754088050_1025_17327712807.mp4
6923450677858_20240411-163737_back_returnGood_70f754088050_1025_17327712807.mp4
6923450677858_20240411-163737_front_returnGood_70f754088050_1025_17327712807.mp4
6923644286293_20240411-141557_back_addGood_70f754088050_795_17327712807.mp4
6923644286293_20240411-141557_front_addGood_70f754088050_795_17327712807.mp4
6923644286293_20240411-141601_back_addGood_70f75407b7ae_800_17788571404.mp4
6923644286293_20240411-141601_front_addGood_70f75407b7ae_800_17788571404.mp4
6923644286293_20240411-141605_back_returnGood_70f754088050_795_17327712807.mp4
6923644286293_20240411-141605_front_returnGood_70f754088050_795_17327712807.mp4
6923644286293_20240411-141609_back_returnGood_70f75407b7ae_800_17788571404.mp4
6923644286293_20240411-141609_front_returnGood_70f75407b7ae_800_17788571404.mp4
6923644298760_20240411-141459_back_addGood_70f75407b7ae_1020_17788571404.mp4
6923644298760_20240411-141459_front_addGood_70f75407b7ae_1020_17788571404.mp4
6923644298760_20240411-141500_back_addGood_70f754088050_1020_17327712807.mp4
6923644298760_20240411-141500_front_addGood_70f754088050_1020_17327712807.mp4
6923644298760_20240411-141511_back_returnGood_70f754088050_1020_17327712807.mp4
6923644298760_20240411-141511_front_returnGood_70f754088050_1020_17327712807.mp4
6923644298760_20240411-141516_back_returnGood_70f75407b7ae_1020_17788571404.mp4
6923644298760_20240411-141516_front_returnGood_70f75407b7ae_1020_17788571404.mp4
6924743915824_20240411-143302_back_addGood_70f75407b7ae_150_17788571404.mp4
6924743915824_20240411-143302_back_addGood_70f754088050_150_17327712807.mp4
6924743915824_20240411-143302_front_addGood_70f75407b7ae_150_17788571404.mp4
6924743915824_20240411-143302_front_addGood_70f754088050_150_17327712807.mp4
6924743915824_20240411-143310_back_returnGood_70f75407b7ae_155_17788571404.mp4
6924743915824_20240411-143310_back_returnGood_70f754088050_150_17327712807.mp4
6924743915824_20240411-143310_front_returnGood_70f75407b7ae_155_17788571404.mp4
6924743915824_20240411-143310_front_returnGood_70f754088050_150_17327712807.mp4
6924882497106_20240411-143858_back_addGood_70f75407b7ae_360_17788571404.mp4
6924882497106_20240411-143858_front_addGood_70f75407b7ae_360_17788571404.mp4
6924882497106_20240411-143904_back_returnGood_70f75407b7ae_360_17788571404.mp4
6924882497106_20240411-143904_front_returnGood_70f75407b7ae_360_17788571404.mp4
6924882497106_20240411-143915_back_addGood_70f754088050_355_17327712807.mp4
6924882497106_20240411-143915_front_addGood_70f754088050_355_17327712807.mp4
6924882497106_20240411-143924_back_returnGood_70f754088050_355_17327712807.mp4
6924882497106_20240411-143924_front_returnGood_70f754088050_355_17327712807.mp4
6925307305525_20240411-141034_back_addGood_70f75407b7ae_1670_17788571404.mp4
6925307305525_20240411-141034_front_addGood_70f75407b7ae_1670_17788571404.mp4
6925307305525_20240411-141044_back_returnGood_70f75407b7ae_1670_17788571404.mp4
6925307305525_20240411-141044_front_returnGood_70f75407b7ae_1670_17788571404.mp4
6925307305525_20240411-141055_back_addGood_70f754088050_1670_17327712807.mp4
6925307305525_20240411-141055_front_addGood_70f754088050_1670_17327712807.mp4
6925307305525_20240411-141106_back_returnGood_70f754088050_1670_17327712807.mp4
6925307305525_20240411-141106_front_returnGood_70f754088050_1670_17327712807.mp4
6928804011173_20240411-143817_back_addGood_70f75407b7ae_555_17788571404.mp4
6928804011173_20240411-143817_front_addGood_70f75407b7ae_555_17788571404.mp4
6928804011173_20240411-143825_back_returnGood_70f75407b7ae_550_17788571404.mp4
6928804011173_20240411-143825_front_returnGood_70f75407b7ae_550_17788571404.mp4
6928804011173_20240411-143833_back_addGood_70f754088050_545_17327712807.mp4
6928804011173_20240411-143833_front_addGood_70f754088050_545_17327712807.mp4
6928804011173_20240411-143841_back_returnGood_70f754088050_550_17327712807.mp4
6928804011173_20240411-143841_front_returnGood_70f754088050_550_17327712807.mp4
6931925828032_20240411-162501_back_addGood_70f75407b7ae_405_17788571404.mp4
6931925828032_20240411-162501_front_addGood_70f75407b7ae_405_17788571404.mp4
6931925828032_20240411-162511_back_returnGood_70f75407b7ae_405_17788571404.mp4
6931925828032_20240411-162511_front_returnGood_70f75407b7ae_405_17788571404.mp4
6931925828032_20240411-162532_back_addGood_70f754088050_405_17327712807.mp4
6931925828032_20240411-162532_front_addGood_70f754088050_405_17327712807.mp4
6931925828032_20240411-162540_back_returnGood_70f754088050_405_17327712807.mp4
6931925828032_20240411-162540_front_returnGood_70f754088050_405_17327712807.mp4
6933620900051_20240411-150018_back_addGood_70f75407b7ae_5_17788571404.mp4
6933620900051_20240411-150018_front_addGood_70f75407b7ae_5_17788571404.mp4
6933620900051_20240411-150035_back_returnGood_70f75407b7ae_380_17788571404.mp4
6933620900051_20240411-150035_front_returnGood_70f75407b7ae_380_17788571404.mp4
6933620900051_20240411-150103_back_addGood_70f754088050_455_17327712807.mp4
6933620900051_20240411-150103_front_addGood_70f754088050_455_17327712807.mp4
6933620900051_20240411-150116_back_returnGood_70f754088050_455_17327712807.mp4
6933620900051_20240411-150116_front_returnGood_70f754088050_455_17327712807.mp4
6934665095108_20240411-141931_back_addGood_70f754088050_360_17327712807.mp4
6934665095108_20240411-141931_front_addGood_70f754088050_360_17327712807.mp4
6934665095108_20240411-141936_back_addGood_70f75407b7ae_360_17788571404.mp4
6934665095108_20240411-141936_front_addGood_70f75407b7ae_360_17788571404.mp4
6934665095108_20240411-141940_back_returnGood_70f754088050_365_17327712807.mp4
6934665095108_20240411-141940_front_returnGood_70f754088050_365_17327712807.mp4
6934665095108_20240411-141945_back_returnGood_70f75407b7ae_360_17788571404.mp4
6934665095108_20240411-141945_front_returnGood_70f75407b7ae_360_17788571404.mp4
6935270642121_20240411-144253_back_addGood_70f75407b7ae_155_17788571404.mp4
6935270642121_20240411-144253_front_addGood_70f75407b7ae_155_17788571404.mp4
6935270642121_20240411-144255_back_addGood_70f754088050_155_17327712807.mp4
6935270642121_20240411-144255_front_addGood_70f754088050_155_17327712807.mp4
6935270642121_20240411-144300_back_returnGood_70f75407b7ae_155_17788571404.mp4
6935270642121_20240411-144300_front_returnGood_70f75407b7ae_155_17788571404.mp4
6935270642121_20240411-144304_back_returnGood_70f754088050_155_17327712807.mp4
6935270642121_20240411-144304_front_returnGood_70f754088050_155_17327712807.mp4
6935284417326_20240411-143504_back_addGood_70f754088050_420_17327712807.mp4
6935284417326_20240411-143504_front_addGood_70f754088050_420_17327712807.mp4
6935284417326_20240411-143508_back_addGood_70f75407b7ae_415_17788571404.mp4
6935284417326_20240411-143508_front_addGood_70f75407b7ae_415_17788571404.mp4
6935284417326_20240411-143517_back_returnGood_70f754088050_420_17327712807.mp4
6935284417326_20240411-143517_front_returnGood_70f754088050_420_17327712807.mp4
6935284417326_20240411-143521_back_returnGood_70f75407b7ae_415_17788571404.mp4
6935284417326_20240411-143521_front_returnGood_70f75407b7ae_415_17788571404.mp4
6941025140798_20240411-135647_back_addGood_70f75407b7ae_1760_17788571404.mp4
6941025140798_20240411-135647_front_addGood_70f75407b7ae_1760_17788571404.mp4
6941025140798_20240411-135655_back_returnGood_70f75407b7ae_1760_17788571404.mp4
6941025140798_20240411-135655_front_returnGood_70f75407b7ae_1760_17788571404.mp4
6941025140798_20240411-140026_back_addGood_70f754088050_1765_17327712807.mp4
6941025140798_20240411-140026_front_addGood_70f754088050_1765_17327712807.mp4
6941025140798_20240411-140105_back_returnGood_70f754088050_1760_17327712807.mp4
6941025140798_20240411-140105_front_returnGood_70f754088050_1760_17327712807.mp4
6952074634794_20240411-143701_back_addGood_70f75407b7ae_275_17788571404.mp4
6952074634794_20240411-143701_front_addGood_70f75407b7ae_275_17788571404.mp4
6952074634794_20240411-143707_back_returnGood_70f75407b7ae_275_17788571404.mp4
6952074634794_20240411-143709_back_returnGood_70f754088050_5_17327712807.mp4
6952074634794_20240411-143709_front_returnGood_70f754088050_5_17327712807.mp4
6952074634794_20240411-143720_back_returnGood_70f754088050_265_17327712807.mp4
6952074634794_20240411-143720_front_returnGood_70f754088050_265_17327712807.mp4
6954432711307_20240411-161612_back_addGood_70f75407b7ae_460_17788571404.mp4
6954432711307_20240411-161612_front_addGood_70f75407b7ae_460_17788571404.mp4
6954432711307_20240411-161622_back_returnGood_70f75407b7ae_460_17788571404.mp4
6954432711307_20240411-161622_front_returnGood_70f75407b7ae_460_17788571404.mp4
6954432711307_20240411-161647_back_addGood_70f754088050_460_17327712807.mp4
6954432711307_20240411-161647_front_addGood_70f754088050_460_17327712807.mp4
6954432711307_20240411-161700_back_returnGood_70f754088050_460_17327712807.mp4
6954432711307_20240411-161700_front_returnGood_70f754088050_460_17327712807.mp4
6959546100993_20240411-135257_back_addGood_70f75407b7ae_295_17788571404.mp4
6959546100993_20240411-135257_front_addGood_70f75407b7ae_295_17788571404.mp4
6959546100993_20240411-135339_back_returnGood_70f75407b7ae_295_17788571404.mp4
6959546100993_20240411-135339_front_returnGood_70f75407b7ae_295_17788571404.mp4
6959546100993_20240411-141255_back_addGood_70f754088050_295_17327712807.mp4
6959546100993_20240411-141255_front_addGood_70f754088050_295_17327712807.mp4
6959546100993_20240411-141309_back_returnGood_70f754088050_295_17327712807.mp4
6959546100993_20240411-141309_front_returnGood_70f754088050_295_17327712807.mp4
6971075127463_20240411-135002_back_addGood_70f75407b7ae_210_17788571404.mp4
6971075127463_20240411-135002_front_addGood_70f75407b7ae_210_17788571404.mp4
6971075127463_20240411-135058_back_returnGood_70f75407b7ae_210_17788571404.mp4
6971075127463_20240411-135058_front_returnGood_70f75407b7ae_210_17788571404.mp4
6971075127463_20240411-141154_back_addGood_70f754088050_215_17327712807.mp4
6971075127463_20240411-141154_front_addGood_70f754088050_215_17327712807.mp4
6971075127463_20240411-141214_back_returnGood_70f754088050_210_17327712807.mp4
6971075127463_20240411-141214_front_returnGood_70f754088050_210_17327712807.mp4
6971328580533_20240411-135748_back_addGood_70f75407b7ae_405_17788571404.mp4
6971328580533_20240411-135748_front_addGood_70f75407b7ae_405_17788571404.mp4
6971328580533_20240411-135757_back_returnGood_70f75407b7ae_405_17788571404.mp4
6971328580533_20240411-135757_front_returnGood_70f75407b7ae_405_17788571404.mp4
6971328580533_20240411-140231_back_addGood_70f754088050_405_17327712807.mp4
6971328580533_20240411-140231_front_addGood_70f754088050_405_17327712807.mp4
6971328580533_20240411-140529_back_addGood_70f754088050_410_17327712807.mp4
6971328580533_20240411-140529_front_addGood_70f754088050_410_17327712807.mp4
6971328580533_20240411-140745_back_returnGood_70f754088050_405_17327712807.mp4
6971328580533_20240411-140745_front_returnGood_70f754088050_405_17327712807.mp4
6971738655333_20240411-144616_back_addGood_70f75407b7ae_270_17788571404.mp4
6971738655333_20240411-144616_back_addGood_70f754088050_260_17327712807.mp4
6971738655333_20240411-144616_front_addGood_70f75407b7ae_270_17788571404.mp4
6971738655333_20240411-144616_front_addGood_70f754088050_260_17327712807.mp4
6971738655333_20240411-144633_back_returnGood_70f75407b7ae_270_17788571404.mp4
6971738655333_20240411-144633_front_returnGood_70f75407b7ae_270_17788571404.mp4
6971738655333_20240411-144635_back_returnGood_70f754088050_260_17327712807.mp4
6971738655333_20240411-144635_front_returnGood_70f754088050_260_17327712807.mp4
6972378998200_20240411-142603_back_addGood_70f754088050_410_17327712807.mp4
6972378998200_20240411-142603_front_addGood_70f754088050_410_17327712807.mp4
6972378998200_20240411-142604_back_addGood_70f75407b7ae_410_17788571404.mp4
6972378998200_20240411-142604_front_addGood_70f75407b7ae_410_17788571404.mp4
6972378998200_20240411-142613_back_returnGood_70f75407b7ae_410_17788571404.mp4
6972378998200_20240411-142613_back_returnGood_70f754088050_410_17327712807.mp4
6972378998200_20240411-142613_front_returnGood_70f75407b7ae_410_17788571404.mp4
6972378998200_20240411-142613_front_returnGood_70f754088050_410_17327712807.mp4
6972790052733_20240411-162926_back_addGood_70f75407b7ae_690_17788571404.mp4
6972790052733_20240411-162926_front_addGood_70f75407b7ae_690_17788571404.mp4
6972790052733_20240411-162933_back_returnGood_70f75407b7ae_690_17788571404.mp4
6972790052733_20240411-162933_front_returnGood_70f75407b7ae_690_17788571404.mp4
6972790052733_20240411-162948_back_addGood_70f754088050_690_17327712807.mp4
6972790052733_20240411-162948_front_addGood_70f754088050_690_17327712807.mp4
6972790052733_20240411-163001_back_returnGood_70f754088050_690_17327712807.mp4
6972790052733_20240411-163001_front_returnGood_70f754088050_690_17327712807.mp4
6974627182033_20240411-150747_back_addGood_70f75407b7ae_485_17788571404.mp4
6974627182033_20240411-150747_front_addGood_70f75407b7ae_485_17788571404.mp4
6974627182033_20240411-150756_back_returnGood_70f75407b7ae_480_17788571404.mp4
6974627182033_20240411-150756_front_returnGood_70f75407b7ae_480_17788571404.mp4
6974627182033_20240411-150825_back_addGood_70f754088050_495_17327712807.mp4
6974627182033_20240411-150825_front_addGood_70f754088050_495_17327712807.mp4
6974627182033_20240411-150835_back_returnGood_70f754088050_495_17327712807.mp4
6974627182033_20240411-150835_front_returnGood_70f754088050_495_17327712807.mp4
6974913231612_20240411-135543_back_addGood_70f75407b7ae_870_17788571404.mp4
6974913231612_20240411-135543_front_addGood_70f75407b7ae_870_17788571404.mp4
6974913231612_20240411-135552_back_returnGood_70f75407b7ae_865_17788571404.mp4
6974913231612_20240411-135552_front_returnGood_70f75407b7ae_865_17788571404.mp4
6974913231612_20240411-140906_back_addGood_70f754088050_755_17327712807.mp4
6974913231612_20240411-140906_front_addGood_70f754088050_755_17327712807.mp4
6974913231612_20240411-140917_back_returnGood_70f754088050_755_17327712807.mp4
6974913231612_20240411-140917_front_returnGood_70f754088050_755_17327712807.mp4
6974995172711_20240411-135456_back_addGood_70f75407b7ae_1455_17788571404.mp4
6974995172711_20240411-135456_front_addGood_70f75407b7ae_1455_17788571404.mp4
6974995172711_20240411-135506_back_returnGood_70f75407b7ae_1460_17788571404.mp4
6974995172711_20240411-135506_front_returnGood_70f75407b7ae_1460_17788571404.mp4
6974995172711_20240411-140831_back_addGood_70f754088050_1460_17327712807.mp4
6974995172711_20240411-140831_front_addGood_70f754088050_1460_17327712807.mp4
6974995172711_20240411-140844_back_returnGood_70f754088050_1460_17327712807.mp4
6974995172711_20240411-140844_front_returnGood_70f754088050_1460_17327712807.mp4
6976090230303_20240411-144457_back_addGood_70f75407b7ae_390_17788571404.mp4
6976090230303_20240411-144457_front_addGood_70f75407b7ae_390_17788571404.mp4
6976090230303_20240411-144458_back_addGood_70f754088050_390_17327712807.mp4
6976090230303_20240411-144458_front_addGood_70f754088050_390_17327712807.mp4
6976090230303_20240411-144508_back_returnGood_70f754088050_390_17327712807.mp4
6976090230303_20240411-144508_front_returnGood_70f754088050_390_17327712807.mp4
6976090230303_20240411-144509_back_returnGood_70f75407b7ae_390_17788571404.mp4
6976090230303_20240411-144509_front_returnGood_70f75407b7ae_390_17788571404.mp4
6976371220276_20240411-150456_back_addGood_70f75407b7ae_390_17788571404.mp4
6976371220276_20240411-150456_front_addGood_70f75407b7ae_390_17788571404.mp4
6976371220276_20240411-150505_back_returnGood_70f75407b7ae_390_17788571404.mp4
6976371220276_20240411-150505_front_returnGood_70f75407b7ae_390_17788571404.mp4
6976371220276_20240411-150559_back_addGood_70f754088050_405_17327712807.mp4
6976371220276_20240411-150559_front_addGood_70f754088050_405_17327712807.mp4
6976371220276_20240411-150610_back_returnGood_70f754088050_405_17327712807.mp4
6976371220276_20240411-150610_front_returnGood_70f754088050_405_17327712807.mp4
230537101280010007_20240412-140824_back_addGood_70f754088050_565_13725988807.mp4
230537101280010007_20240412-140824_front_addGood_70f754088050_565_13725988807.mp4
230537101280010007_20240412-140835_back_returnGood_70f754088050_565_13725988807.mp4
230537101280010007_20240412-140835_front_returnGood_70f754088050_565_13725988807.mp4
2500456001326_20240412-140949_back_addGood_70f754088050_150_13725988807.mp4
2500456001326_20240412-140949_front_addGood_70f754088050_150_13725988807.mp4
2500456001326_20240412-140958_back_returnGood_70f754088050_155_13725988807.mp4
2500456001326_20240412-140958_front_returnGood_70f754088050_155_13725988807.mp4
2500458675341_20240412-140915_back_addGood_70f754088050_130_13725988807.mp4
2500458675341_20240412-140915_front_addGood_70f754088050_130_13725988807.mp4
2500458675341_20240412-140924_back_returnGood_70f754088050_130_13725988807.mp4
2500458675341_20240412-140924_front_returnGood_70f754088050_130_13725988807.mp4
2500463464671_20240412-140718_back_addGood_70f754088050_810_13725988807.mp4
2500463464671_20240412-140718_front_addGood_70f754088050_810_13725988807.mp4
2500463464671_20240412-140731_back_returnGood_70f754088050_810_13725988807.mp4
2500463464671_20240412-140731_front_returnGood_70f754088050_810_13725988807.mp4
6901070613142_20240412-144631_back_addGood_70f754088050_240_13725988807.mp4
6901070613142_20240412-144631_front_addGood_70f754088050_240_13725988807.mp4
6901070613142_20240412-144643_back_returnGood_70f754088050_240_13725988807.mp4
6901070613142_20240412-144643_front_returnGood_70f754088050_240_13725988807.mp4
6901586000993_20240412-152404_back_addGood_70f754088050_890_13725988807.mp4
6901586000993_20240412-152404_front_addGood_70f754088050_890_13725988807.mp4
6901586000993_20240412-152440_back_returnGood_70f754088050_885_13725988807.mp4
6901586000993_20240412-152440_front_returnGood_70f754088050_885_13725988807.mp4
6901668053893_20240412-150115_back_addGood_70f754088050_70_13725988807.mp4
6901668053893_20240412-150115_front_addGood_70f754088050_70_13725988807.mp4
6901668053893_20240412-150125_back_returnGood_70f754088050_70_13725988807.mp4
6901668053893_20240412-150125_front_returnGood_70f754088050_70_13725988807.mp4
6902007010249_20240412-144518_back_addGood_70f754088050_755_13725988807.mp4
6902007010249_20240412-144518_front_addGood_70f754088050_755_13725988807.mp4
6902007010249_20240412-144529_back_returnGood_70f754088050_755_13725988807.mp4
6902007010249_20240412-144529_front_returnGood_70f754088050_755_13725988807.mp4
6902022135514_20240412-144740_back_addGood_70f754088050_3180_13725988807.mp4
6902022135514_20240412-144740_front_addGood_70f754088050_3180_13725988807.mp4
6902022135514_20240412-144750_back_returnGood_70f754088050_3180_13725988807.mp4
6902022135514_20240412-144750_front_returnGood_70f754088050_3180_13725988807.mp4
6902265114369_20240412-144022_back_addGood_70f754088050_720_13725988807.mp4
6902265114369_20240412-144022_front_addGood_70f754088050_720_13725988807.mp4
6902265114369_20240412-144034_back_returnGood_70f754088050_720_13725988807.mp4
6902265114369_20240412-144034_front_returnGood_70f754088050_720_13725988807.mp4
6902265908012_20240412-144141_back_addGood_70f754088050_1145_13725988807.mp4
6902265908012_20240412-144141_front_addGood_70f754088050_1145_13725988807.mp4
6902265908012_20240412-144149_back_returnGood_70f754088050_1145_13725988807.mp4
6902265908012_20240412-144149_front_returnGood_70f754088050_1145_13725988807.mp4
69025143_20240412-135423_back_addGood_70f754088050_3015_13725988807.mp4
69025143_20240412-135423_front_addGood_70f754088050_3015_13725988807.mp4
69025143_20240412-135439_back_returnGood_70f754088050_3015_13725988807.mp4
69025143_20240412-135439_front_returnGood_70f754088050_3015_13725988807.mp4
6907992103952_20240412-141542_back_addGood_70f754088050_190_13725988807.mp4
6907992103952_20240412-141542_front_addGood_70f754088050_190_13725988807.mp4
6907992103952_20240412-141554_back_returnGood_70f754088050_190_13725988807.mp4
6907992103952_20240412-141554_front_returnGood_70f754088050_190_13725988807.mp4
6907992104157_20240412-141255_back_addGood_70f754088050_1120_13725988807.mp4
6907992104157_20240412-141255_front_addGood_70f754088050_1120_13725988807.mp4
6907992104157_20240412-141307_back_returnGood_70f754088050_1120_13725988807.mp4
6907992104157_20240412-141307_front_returnGood_70f754088050_1120_13725988807.mp4
6907992105260_20240412-142144_back_addGood_70f754088050_2775_13725988807.mp4
6907992105260_20240412-142144_front_addGood_70f754088050_2775_13725988807.mp4
6907992105260_20240412-142156_back_returnGood_70f754088050_2775_13725988807.mp4
6907992105260_20240412-142156_front_returnGood_70f754088050_2775_13725988807.mp4
6907992105260_20240412-143214_back_addGood_70f754088050_2770_13725988807.mp4
6907992105260_20240412-143214_front_addGood_70f754088050_2770_13725988807.mp4
6907992105260_20240412-143225_back_returnGood_70f754088050_2775_13725988807.mp4
6907992105260_20240412-143225_front_returnGood_70f754088050_2775_13725988807.mp4
6907992105765_20240412-141505_back_addGood_70f754088050_2085_13725988807.mp4
6907992105765_20240412-141505_front_addGood_70f754088050_2085_13725988807.mp4
6907992105765_20240412-141523_back_returnGood_70f754088050_2085_13725988807.mp4
6907992105765_20240412-141523_front_returnGood_70f754088050_2085_13725988807.mp4
6907992106205_20240412-141729_back_addGood_70f754088050_790_13725988807.mp4
6907992106205_20240412-141729_front_addGood_70f754088050_790_13725988807.mp4
6907992106205_20240412-141741_back_returnGood_70f754088050_790_13725988807.mp4
6907992106205_20240412-141741_front_returnGood_70f754088050_790_13725988807.mp4
6907992106311_20240412-141326_back_addGood_70f754088050_890_13725988807.mp4
6907992106311_20240412-141326_front_addGood_70f754088050_890_13725988807.mp4
6907992106311_20240412-141341_back_returnGood_70f754088050_890_13725988807.mp4
6907992106311_20240412-141341_front_returnGood_70f754088050_890_13725988807.mp4
6914973602908_20240412-134547_back_addGood_70f754088050_505_13725988807.mp4
6914973602908_20240412-134547_front_addGood_70f754088050_505_13725988807.mp4
6914973602908_20240412-134600_back_returnGood_70f754088050_505_13725988807.mp4
6914973602908_20240412-134600_front_returnGood_70f754088050_505_13725988807.mp4
6914973604223_20240412-143609_back_addGood_70f754088050_230_13725988807.mp4
6914973604223_20240412-143609_front_addGood_70f754088050_230_13725988807.mp4
6914973604223_20240412-143619_back_returnGood_70f754088050_230_13725988807.mp4
6914973604223_20240412-143619_front_returnGood_70f754088050_230_13725988807.mp4
6914973606340_20240412-134352_back_addGood_70f754088050_325_13725988807.mp4
6914973606340_20240412-134352_front_addGood_70f754088050_325_13725988807.mp4
6914973606340_20240412-134406_back_returnGood_70f754088050_330_13725988807.mp4
6914973606340_20240412-134406_front_returnGood_70f754088050_330_13725988807.mp4
6919188092377_20240412-141841_back_addGood_70f754088050_515_13725988807.mp4
6919188092377_20240412-141841_front_addGood_70f754088050_515_13725988807.mp4
6919188092377_20240412-141850_back_returnGood_70f754088050_515_13725988807.mp4
6919188092377_20240412-141850_front_returnGood_70f754088050_515_13725988807.mp4
6920152400630_20240412-151036_back_returnGood_70f754088050_580_13725988807.mp4
6920174757101_20240412-144934_back_addGood_70f754088050_1300_13725988807.mp4
6920174757101_20240412-144934_front_addGood_70f754088050_1300_13725988807.mp4
6920174757101_20240412-144946_back_returnGood_70f754088050_1300_13725988807.mp4
6920174757101_20240412-144946_front_returnGood_70f754088050_1300_13725988807.mp4
6920459905012_20240412-150826_back_returnGood_70f754088050_550_13725988807.mp4
6920907810707_20240412-145330_back_addGood_70f754088050_75_13725988807.mp4
6920907810707_20240412-145330_front_addGood_70f754088050_75_13725988807.mp4
6920907810707_20240412-145340_back_returnGood_70f754088050_75_13725988807.mp4
6920907810707_20240412-145340_front_returnGood_70f754088050_75_13725988807.mp4
6922130119213_20240412-144416_back_addGood_70f754088050_1020_13725988807.mp4
6922130119213_20240412-144416_front_addGood_70f754088050_1020_13725988807.mp4
6922130119213_20240412-144426_back_returnGood_70f754088050_1020_13725988807.mp4
6922130119213_20240412-144426_front_returnGood_70f754088050_1020_13725988807.mp4
6922577700968_20240412-141700_back_addGood_70f754088050_1040_13725988807.mp4
6922577700968_20240412-141700_front_addGood_70f754088050_1040_13725988807.mp4
6922577700968_20240412-141710_back_returnGood_70f754088050_1040_13725988807.mp4
6922577700968_20240412-141710_front_returnGood_70f754088050_1040_13725988807.mp4
6922868291168_20240412-145057_back_addGood_70f754088050_1160_13725988807.mp4
6922868291168_20240412-145057_front_addGood_70f754088050_1160_13725988807.mp4
6922868291168_20240412-145113_back_returnGood_70f754088050_1160_13725988807.mp4
6922868291168_20240412-145113_front_returnGood_70f754088050_1160_13725988807.mp4
6923450601549_20240412-134454_back_addGood_70f754088050_640_13725988807.mp4
6923450601549_20240412-134454_front_addGood_70f754088050_640_13725988807.mp4
6923450601549_20240412-134504_back_returnGood_70f754088050_635_13725988807.mp4
6923450601549_20240412-134504_front_returnGood_70f754088050_635_13725988807.mp4
6923450603574_20240412-135226_back_addGood_70f754088050_970_13725988807.mp4
6923450603574_20240412-135226_front_addGood_70f754088050_970_13725988807.mp4
6923450603574_20240412-135240_back_returnGood_70f754088050_965_13725988807.mp4
6923450603574_20240412-135240_front_returnGood_70f754088050_965_13725988807.mp4
6923450605288_20240412-134133_back_addGood_70f754088050_470_13725988807.mp4
6923450605288_20240412-134133_front_addGood_70f754088050_470_13725988807.mp4
6923450605288_20240412-134143_back_returnGood_70f754088050_470_13725988807.mp4
6923450605288_20240412-134143_front_returnGood_70f754088050_470_13725988807.mp4
6923450605332_20240412-135742_back_addGood_70f754088050_1785_13725988807.mp4
6923450605332_20240412-135742_front_addGood_70f754088050_1785_13725988807.mp4
6923450605332_20240412-135756_back_returnGood_70f754088050_1785_13725988807.mp4
6923450605332_20240412-135756_front_returnGood_70f754088050_1785_13725988807.mp4
6923450610428_20240412-133618_back_addGood_70f754088050_320_13725988807.mp4
6923450610428_20240412-133618_front_addGood_70f754088050_320_13725988807.mp4
6923450610428_20240412-133632_back_returnGood_70f754088050_320_13725988807.mp4
6923450610428_20240412-133632_front_returnGood_70f754088050_320_13725988807.mp4
6923450610459_20240412-135040_back_addGood_70f754088050_555_13725988807.mp4
6923450610459_20240412-135040_front_addGood_70f754088050_555_13725988807.mp4
6923450610459_20240412-135057_back_returnGood_70f754088050_555_13725988807.mp4
6923450610459_20240412-135057_front_returnGood_70f754088050_555_13725988807.mp4
6923450611067_20240412-134949_back_addGood_70f754088050_800_13725988807.mp4
6923450611067_20240412-134949_front_addGood_70f754088050_800_13725988807.mp4
6923450611067_20240412-135002_back_returnGood_70f754088050_800_13725988807.mp4
6923450611067_20240412-135002_front_returnGood_70f754088050_800_13725988807.mp4
6923450612415_20240412-133515_back_addGood_70f754088050_820_13725988807.mp4
6923450612415_20240412-133515_front_addGood_70f754088050_820_13725988807.mp4
6923450612415_20240412-133532_back_returnGood_70f754088050_820_13725988807.mp4
6923450612415_20240412-133532_front_returnGood_70f754088050_820_13725988807.mp4
6923450612484_20240412-133746_back_addGood_70f754088050_255_13725988807.mp4
6923450612484_20240412-133746_front_addGood_70f754088050_255_13725988807.mp4
6923450612484_20240412-133758_back_returnGood_70f754088050_255_13725988807.mp4
6923450612484_20240412-133758_front_returnGood_70f754088050_255_13725988807.mp4
6923450657829_20240412-135628_back_addGood_70f754088050_965_13725988807.mp4
6923450657829_20240412-135628_front_addGood_70f754088050_965_13725988807.mp4
6923450657829_20240412-135640_back_returnGood_70f754088050_965_13725988807.mp4
6923450657829_20240412-135640_front_returnGood_70f754088050_965_13725988807.mp4
6923450659441_20240412-135319_back_addGood_70f754088050_1825_13725988807.mp4
6923450659441_20240412-135319_front_addGood_70f754088050_1825_13725988807.mp4
6923450659441_20240412-135334_back_returnGood_70f754088050_1825_13725988807.mp4
6923450659441_20240412-135334_front_returnGood_70f754088050_1825_13725988807.mp4
6923450666838_20240412-134807_back_addGood_70f754088050_760_13725988807.mp4
6923450666838_20240412-134807_front_addGood_70f754088050_760_13725988807.mp4
6923450666838_20240412-134818_back_returnGood_70f754088050_760_13725988807.mp4
6923450666838_20240412-134818_front_returnGood_70f754088050_760_13725988807.mp4
6923450668207_20240412-134250_back_addGood_70f754088050_700_13725988807.mp4
6923450668207_20240412-134250_front_addGood_70f754088050_700_13725988807.mp4
6923450668207_20240412-134302_back_returnGood_70f754088050_700_13725988807.mp4
6923450668207_20240412-134302_front_returnGood_70f754088050_700_13725988807.mp4
6923450677858_20240412-135523_back_addGood_70f754088050_910_13725988807.mp4
6923450677858_20240412-135523_front_addGood_70f754088050_910_13725988807.mp4
6923450677858_20240412-135537_back_returnGood_70f754088050_910_13725988807.mp4
6923450677858_20240412-135537_front_returnGood_70f754088050_910_13725988807.mp4
6923644286293_20240412-141148_back_addGood_70f754088050_810_13725988807.mp4
6923644286293_20240412-141148_front_addGood_70f754088050_810_13725988807.mp4
6923644286293_20240412-141157_back_returnGood_70f754088050_805_13725988807.mp4
6923644286293_20240412-141157_front_returnGood_70f754088050_805_13725988807.mp4
6923644298760_20240412-141219_back_addGood_70f754088050_1020_13725988807.mp4
6923644298760_20240412-141219_front_addGood_70f754088050_1020_13725988807.mp4
6923644298760_20240412-141227_back_returnGood_70f754088050_1020_13725988807.mp4
6923644298760_20240412-141227_front_returnGood_70f754088050_1020_13725988807.mp4
6924743915824_20240412-145245_back_addGood_70f754088050_155_13725988807.mp4
6924743915824_20240412-145245_front_addGood_70f754088050_155_13725988807.mp4
6924743915824_20240412-145254_back_returnGood_70f754088050_155_13725988807.mp4
6924743915824_20240412-145254_front_returnGood_70f754088050_155_13725988807.mp4
6924882497106_20240412-150553_back_addGood_70f754088050_355_13725988807.mp4
6924882497106_20240412-150553_front_addGood_70f754088050_355_13725988807.mp4
6924882497106_20240412-150604_back_returnGood_70f754088050_355_13725988807.mp4
6924882497106_20240412-150604_front_returnGood_70f754088050_355_13725988807.mp4
6925307305525_20240412-152608_back_addGood_70f754088050_590_13725988807.mp4
6925307305525_20240412-152608_front_addGood_70f754088050_590_13725988807.mp4
6925307305525_20240412-152627_back_returnGood_70f754088050_590_13725988807.mp4
6925307305525_20240412-152627_front_returnGood_70f754088050_590_13725988807.mp4
6928033404968_20240412-143452_back_addGood_70f754088050_400_13725988807.mp4
6928033404968_20240412-143452_front_addGood_70f754088050_400_13725988807.mp4
6928033404968_20240412-143502_back_returnGood_70f754088050_400_13725988807.mp4
6928033404968_20240412-143502_front_returnGood_70f754088050_400_13725988807.mp4
6928804011173_20240412-152735_back_addGood_70f754088050_545_13725988807.mp4
6928804011173_20240412-152735_front_addGood_70f754088050_545_13725988807.mp4
6928804011173_20240412-152746_back_returnGood_70f754088050_545_13725988807.mp4
6928804011173_20240412-152746_front_returnGood_70f754088050_545_13725988807.mp4
6931925828032_20240412-134659_back_addGood_70f754088050_415_13725988807.mp4
6931925828032_20240412-134659_front_addGood_70f754088050_415_13725988807.mp4
6931925828032_20240412-134711_back_returnGood_70f754088050_415_13725988807.mp4
6931925828032_20240412-134711_front_returnGood_70f754088050_415_13725988807.mp4
6933620900051_20240412-140306_back_addGood_70f754088050_385_13725988807.mp4
6933620900051_20240412-140306_front_addGood_70f754088050_385_13725988807.mp4
6933620900051_20240412-140326_back_returnGood_70f754088050_390_13725988807.mp4
6933620900051_20240412-140326_front_returnGood_70f754088050_390_13725988807.mp4
6934665095108_20240412-141758_back_addGood_70f754088050_355_13725988807.mp4
6934665095108_20240412-141758_front_addGood_70f754088050_355_13725988807.mp4
6934665095108_20240412-141807_back_returnGood_70f754088050_355_13725988807.mp4
6934665095108_20240412-141807_front_returnGood_70f754088050_355_13725988807.mp4
6935270642121_20240412-151112_back_addGood_70f754088050_165_13725988807.mp4
6935270642121_20240412-151112_front_addGood_70f754088050_165_13725988807.mp4
6935270642121_20240412-151124_back_returnGood_70f754088050_165_13725988807.mp4
6935270642121_20240412-151124_front_returnGood_70f754088050_165_13725988807.mp4
6935284417326_20240412-145558_back_addGood_70f754088050_410_13725988807.mp4
6935284417326_20240412-145558_front_addGood_70f754088050_410_13725988807.mp4
6935284417326_20240412-145610_back_returnGood_70f754088050_410_13725988807.mp4
6935284417326_20240412-145610_front_returnGood_70f754088050_410_13725988807.mp4
6941025140798_20240412-152222_back_addGood_70f754088050_1160_13725988807.mp4
6941025140798_20240412-152222_front_addGood_70f754088050_1160_13725988807.mp4
6941025140798_20240412-152235_back_returnGood_70f754088050_1155_13725988807.mp4
6941025140798_20240412-152235_front_returnGood_70f754088050_1155_13725988807.mp4
6952074634794_20240412-153052_back_addGood_70f754088050_265_13725988807.mp4
6952074634794_20240412-153052_front_addGood_70f754088050_265_13725988807.mp4
6952074634794_20240412-153105_back_returnGood_70f754088050_265_13725988807.mp4
6952074634794_20240412-153105_front_returnGood_70f754088050_265_13725988807.mp4
6952074634794_20240412-153233_back_addGood_70f754088050_265_13725988807.mp4
6952074634794_20240412-153233_front_addGood_70f754088050_265_13725988807.mp4
6952074634794_20240412-153245_back_returnGood_70f754088050_265_13725988807.mp4
6952074634794_20240412-153245_front_returnGood_70f754088050_265_13725988807.mp4
6954432711307_20240412-134028_back_addGood_70f754088050_355_13725988807.mp4
6954432711307_20240412-134028_front_addGood_70f754088050_355_13725988807.mp4
6954432711307_20240412-134040_back_returnGood_70f754088050_350_13725988807.mp4
6954432711307_20240412-134040_front_returnGood_70f754088050_350_13725988807.mp4
6959546100993_20240412-142438_back_addGood_70f754088050_295_13725988807.mp4
6959546100993_20240412-142438_front_addGood_70f754088050_295_13725988807.mp4
6959546100993_20240412-142455_back_returnGood_70f754088050_295_13725988807.mp4
6959546100993_20240412-142455_front_returnGood_70f754088050_295_13725988807.mp4
6971075127463_20240412-142330_back_addGood_70f754088050_210_13725988807.mp4
6971075127463_20240412-142330_front_addGood_70f754088050_210_13725988807.mp4
6971075127463_20240412-142347_back_returnGood_70f754088050_215_13725988807.mp4
6971075127463_20240412-142347_front_returnGood_70f754088050_215_13725988807.mp4
6971075127470_20240412-142650_back_addGood_70f754088050_215_13725988807.mp4
6971075127470_20240412-142650_front_addGood_70f754088050_215_13725988807.mp4
6971075127470_20240412-142700_back_returnGood_70f754088050_215_13725988807.mp4
6971075127470_20240412-142700_front_returnGood_70f754088050_215_13725988807.mp4
6971328580533_20240412-152303_back_addGood_70f754088050_655_13725988807.mp4
6971328580533_20240412-152303_front_addGood_70f754088050_655_13725988807.mp4
6971328580533_20240412-152322_back_returnGood_70f754088050_650_13725988807.mp4
6971328580533_20240412-152322_front_returnGood_70f754088050_650_13725988807.mp4
6971738655333_20240412-141033_back_addGood_70f754088050_270_13725988807.mp4
6971738655333_20240412-141033_front_addGood_70f754088050_270_13725988807.mp4
6971738655333_20240412-141042_back_returnGood_70f754088050_270_13725988807.mp4
6971738655333_20240412-141042_front_returnGood_70f754088050_270_13725988807.mp4
6972378998200_20240412-144314_back_addGood_70f754088050_410_13725988807.mp4
6972378998200_20240412-144314_front_addGood_70f754088050_410_13725988807.mp4
6972378998200_20240412-144326_back_returnGood_70f754088050_410_13725988807.mp4
6972378998200_20240412-144326_front_returnGood_70f754088050_410_13725988807.mp4
6972790052733_20240412-135134_back_addGood_70f754088050_525_13725988807.mp4
6972790052733_20240412-135134_front_addGood_70f754088050_525_13725988807.mp4
6972790052733_20240412-135145_back_returnGood_70f754088050_525_13725988807.mp4
6972790052733_20240412-135145_front_returnGood_70f754088050_525_13725988807.mp4
6974913231612_20240412-142848_back_addGood_70f754088050_500_13725988807.mp4
6974913231612_20240412-142848_front_addGood_70f754088050_500_13725988807.mp4
6974913231612_20240412-142900_back_returnGood_70f754088050_495_13725988807.mp4
6974913231612_20240412-142900_front_returnGood_70f754088050_495_13725988807.mp4
6974995172711_20240412-152143_back_addGood_70f754088050_455_13725988807.mp4
6974995172711_20240412-152143_front_addGood_70f754088050_455_13725988807.mp4
6974995172711_20240412-152158_back_returnGood_70f754088050_455_13725988807.mp4
6974995172711_20240412-152158_front_returnGood_70f754088050_455_13725988807.mp4
6976371220276_20240412-140448_back_addGood_70f754088050_295_13725988807.mp4
6976371220276_20240412-140448_front_addGood_70f754088050_295_13725988807.mp4
6976371220276_20240412-140459_back_returnGood_70f754088050_295_13725988807.mp4
6976371220276_20240412-140459_front_returnGood_70f754088050_295_13725988807.mp4
850009021632_20240412-152522_back_addGood_70f754088050_470_13725988807.mp4
850009021632_20240412-152522_front_addGood_70f754088050_470_13725988807.mp4
850009021632_20240412-152543_back_returnGood_70f754088050_470_13725988807.mp4
850009021632_20240412-152543_front_returnGood_70f754088050_470_13725988807.mp4

View File

@ -0,0 +1,208 @@
6901070613142_20240411-170415_back_addGood_70f75407b7ae_430_17788571404.mp4
6901070613142_20240411-170415_front_addGood_70f75407b7ae_430_17788571404.mp4
6901070613142_20240411-170424_back_returnGood_70f75407b7ae_430_17788571404.mp4
6901070613142_20240411-170424_front_returnGood_70f75407b7ae_430_17788571404.mp4
6901070613142_20240411-170441_back_addGood_70f754088050_430_17327712807.mp4
6901070613142_20240411-170441_front_addGood_70f754088050_430_17327712807.mp4
6901070613142_20240411-170450_back_returnGood_70f754088050_430_17327712807.mp4
6901070613142_20240411-170450_front_returnGood_70f754088050_430_17327712807.mp4
6902538007367_20240411-165931_back_addGood_70f75407b7ae_995_17788571404.mp4
6902538007367_20240411-165931_front_addGood_70f75407b7ae_995_17788571404.mp4
6902538007367_20240411-165942_back_returnGood_70f75407b7ae_995_17788571404.mp4
6902538007367_20240411-165942_front_returnGood_70f75407b7ae_995_17788571404.mp4
6902538007367_20240411-165954_back_addGood_70f754088050_1000_17327712807.mp4
6902538007367_20240411-165954_front_addGood_70f754088050_1000_17327712807.mp4
6902538007367_20240411-170005_back_returnGood_70f754088050_1000_17327712807.mp4
6902538007367_20240411-170005_front_returnGood_70f754088050_1000_17327712807.mp4
6920152400630_20240411-165136_back_addGood_70f75407b7ae_720_17788571404.mp4
6920152400630_20240411-165136_front_addGood_70f75407b7ae_720_17788571404.mp4
6920152400630_20240411-165151_back_returnGood_70f75407b7ae_715_17788571404.mp4
6920152400630_20240411-165151_front_returnGood_70f75407b7ae_715_17788571404.mp4
6920152400630_20240411-165213_back_addGood_70f754088050_720_17327712807.mp4
6920152400630_20240411-165213_front_addGood_70f754088050_720_17327712807.mp4
6920152400630_20240411-165224_back_returnGood_70f754088050_720_17327712807.mp4
6920152400630_20240411-165224_front_returnGood_70f754088050_720_17327712807.mp4
6920907810707_20240411-165615_back_addGood_70f75407b7ae_225_17788571404.mp4
6920907810707_20240411-165615_front_addGood_70f75407b7ae_225_17788571404.mp4
6920907810707_20240411-165625_back_returnGood_70f75407b7ae_225_17788571404.mp4
6920907810707_20240411-165625_front_returnGood_70f75407b7ae_225_17788571404.mp4
6920907810707_20240411-165635_back_addGood_70f754088050_225_17327712807.mp4
6920907810707_20240411-165635_front_addGood_70f754088050_225_17327712807.mp4
6920907810707_20240411-165646_back_returnGood_70f754088050_225_17327712807.mp4
6920907810707_20240411-165646_front_returnGood_70f754088050_225_17327712807.mp4
6923450605288_20240411-172045_back_addGood_70f75407b7ae_750_17788571404.mp4
6923450605288_20240411-172045_front_addGood_70f75407b7ae_750_17788571404.mp4
6923450605288_20240411-172058_back_returnGood_70f75407b7ae_750_17788571404.mp4
6923450605288_20240411-172058_front_returnGood_70f75407b7ae_750_17788571404.mp4
6923450605288_20240411-172134_back_addGood_70f754088050_750_17327712807.mp4
6923450605288_20240411-172134_front_addGood_70f754088050_750_17327712807.mp4
6923450605288_20240411-172147_back_returnGood_70f754088050_750_17327712807.mp4
6923450605288_20240411-172147_front_returnGood_70f754088050_750_17327712807.mp4
6923450610428_20240411-171842_back_addGood_70f75407b7ae_270_17788571404.mp4
6923450610428_20240411-171842_front_addGood_70f75407b7ae_270_17788571404.mp4
6923450610428_20240411-171900_back_returnGood_70f75407b7ae_755_17788571404.mp4
6923450610428_20240411-171900_front_returnGood_70f75407b7ae_755_17788571404.mp4
6923450610428_20240411-171918_back_addGood_70f754088050_755_17327712807.mp4
6923450610428_20240411-171918_front_addGood_70f754088050_755_17327712807.mp4
6923450610428_20240411-172000_back_returnGood_70f754088050_755_17327712807.mp4
6923450610428_20240411-172000_front_returnGood_70f754088050_755_17327712807.mp4
6923450659441_20240411-171417_back_addGood_70f75407b7ae_2340_17788571404.mp4
6923450659441_20240411-171417_front_addGood_70f75407b7ae_2340_17788571404.mp4
6923450659441_20240411-171430_back_returnGood_70f75407b7ae_2335_17788571404.mp4
6923450659441_20240411-171430_front_returnGood_70f75407b7ae_2335_17788571404.mp4
6923450659441_20240411-171445_back_addGood_70f754088050_2340_17327712807.mp4
6923450659441_20240411-171445_front_addGood_70f754088050_2340_17327712807.mp4
6923450659441_20240411-171456_back_returnGood_70f754088050_2340_17327712807.mp4
6923450659441_20240411-171456_front_returnGood_70f754088050_2340_17327712807.mp4
6923450677858_20240411-171658_back_addGood_70f75407b7ae_1310_17788571404.mp4
6923450677858_20240411-171658_front_addGood_70f75407b7ae_1310_17788571404.mp4
6923450677858_20240411-171709_back_returnGood_70f75407b7ae_1310_17788571404.mp4
6923450677858_20240411-171709_front_returnGood_70f75407b7ae_1310_17788571404.mp4
6923450677858_20240411-171720_back_addGood_70f754088050_1310_17327712807.mp4
6923450677858_20240411-171720_front_addGood_70f754088050_1310_17327712807.mp4
6923450677858_20240411-171730_back_returnGood_70f754088050_1315_17327712807.mp4
6923450677858_20240411-171730_front_returnGood_70f754088050_1315_17327712807.mp4
6928804011173_20240411-165756_back_addGood_70f75407b7ae_615_17788571404.mp4
6928804011173_20240411-165756_front_addGood_70f75407b7ae_615_17788571404.mp4
6928804011173_20240411-165808_back_returnGood_70f75407b7ae_620_17788571404.mp4
6928804011173_20240411-165808_front_returnGood_70f75407b7ae_620_17788571404.mp4
6928804011173_20240411-165822_back_addGood_70f754088050_620_17327712807.mp4
6928804011173_20240411-165822_front_addGood_70f754088050_620_17327712807.mp4
6928804011173_20240411-165830_back_returnGood_70f754088050_620_17327712807.mp4
6928804011173_20240411-165830_front_returnGood_70f754088050_620_17327712807.mp4
6976371220276_20240411-170159_back_addGood_70f75407b7ae_745_17788571404.mp4
6976371220276_20240411-170159_front_addGood_70f75407b7ae_745_17788571404.mp4
6976371220276_20240411-170211_back_returnGood_70f75407b7ae_745_17788571404.mp4
6976371220276_20240411-170211_front_returnGood_70f75407b7ae_745_17788571404.mp4
6976371220276_20240411-170230_back_addGood_70f754088050_745_17327712807.mp4
6976371220276_20240411-170230_front_addGood_70f754088050_745_17327712807.mp4
6976371220276_20240411-170240_back_returnGood_70f754088050_745_17327712807.mp4
6976371220276_20240411-170240_front_returnGood_70f754088050_745_17327712807.mp4
230537101280010007_20240412-114205_back_addGood_70f75407b7ae_720_17327712807.mp4
230537101280010007_20240412-114205_front_addGood_70f75407b7ae_720_17327712807.mp4
230537101280010007_20240412-114214_back_returnGood_70f75407b7ae_720_17327712807.mp4
230537101280010007_20240412-114214_front_returnGood_70f75407b7ae_720_17327712807.mp4
2500456001326_20240412-110503_back_addGood_70f75407b7ae_1085_17327712807.mp4
2500456001326_20240412-110503_front_addGood_70f75407b7ae_1085_17327712807.mp4
2500456001326_20240412-110513_back_returnGood_70f75407b7ae_1085_17327712807.mp4
2500456001326_20240412-110513_front_returnGood_70f75407b7ae_1085_17327712807.mp4
6901070613142_20240412-110200_back_addGood_70f754088050_1180_17788571404.mp4
6901070613142_20240412-110200_front_addGood_70f754088050_1180_17788571404.mp4
6901070613142_20240412-110207_back_returnGood_70f754088050_1180_17788571404.mp4
6901070613142_20240412-110207_front_returnGood_70f754088050_1180_17788571404.mp4
6901070613142_20240412-112959_back_addGood_70f75407b7ae_655_17327712807.mp4
6901070613142_20240412-112959_front_addGood_70f75407b7ae_655_17327712807.mp4
6901070613142_20240412-113011_back_returnGood_70f75407b7ae_655_17327712807.mp4
6901070613142_20240412-113011_front_returnGood_70f75407b7ae_655_17327712807.mp4
6901668053893_20240412-113635_back_addGood_70f75407b7ae_565_17327712807.mp4
6901668053893_20240412-113635_front_addGood_70f75407b7ae_565_17327712807.mp4
6901668053893_20240412-113645_back_returnGood_70f75407b7ae_640_17327712807.mp4
6901668053893_20240412-113645_front_returnGood_70f75407b7ae_640_17327712807.mp4
6902007010249_20240412-113050_back_addGood_70f75407b7ae_1460_17327712807.mp4
6902007010249_20240412-113050_front_addGood_70f75407b7ae_1460_17327712807.mp4
6902007010249_20240412-113103_back_returnGood_70f75407b7ae_1460_17327712807.mp4
6902007010249_20240412-113103_front_returnGood_70f75407b7ae_1460_17327712807.mp4
6902022135514_20240412-112914_back_addGood_70f75407b7ae_3640_17327712807.mp4
6902022135514_20240412-112914_front_addGood_70f75407b7ae_3640_17327712807.mp4
6902022135514_20240412-112926_back_returnGood_70f75407b7ae_3640_17327712807.mp4
6902022135514_20240412-112926_front_returnGood_70f75407b7ae_3640_17327712807.mp4
6902265114369_20240412-110609_back_addGood_70f75407b7ae_990_17327712807.mp4
6902265114369_20240412-110609_front_addGood_70f75407b7ae_990_17327712807.mp4
6902265114369_20240412-110618_back_returnGood_70f75407b7ae_990_17327712807.mp4
6902265114369_20240412-110618_front_returnGood_70f75407b7ae_990_17327712807.mp4
6902265908012_20240412-111958_back_addGood_70f75407b7ae_1520_17327712807.mp4
6902265908012_20240412-111958_front_addGood_70f75407b7ae_1520_17327712807.mp4
6902265908012_20240412-112012_back_returnGood_70f75407b7ae_1520_17327712807.mp4
6902265908012_20240412-112012_front_returnGood_70f75407b7ae_1520_17327712807.mp4
6902538007367_20240412-105046_back_addGood_70f754088050_930_17788571404.mp4
6902538007367_20240412-105046_front_addGood_70f754088050_930_17788571404.mp4
6902538007367_20240412-105057_back_returnGood_70f754088050_930_17788571404.mp4
6902538007367_20240412-105057_front_returnGood_70f754088050_930_17788571404.mp4
6902538007367_20240412-105130_back_addGood_70f754088050_930_17788571404.mp4
6902538007367_20240412-105130_front_addGood_70f754088050_930_17788571404.mp4
6902538007367_20240412-105143_back_returnGood_70f754088050_935_17788571404.mp4
6902538007367_20240412-105143_front_returnGood_70f754088050_935_17788571404.mp4
6907992103952_20240412-114032_back_addGood_70f75407b7ae_545_17327712807.mp4
6907992103952_20240412-114032_front_addGood_70f75407b7ae_545_17327712807.mp4
6907992103952_20240412-114043_back_returnGood_70f75407b7ae_540_17327712807.mp4
6907992103952_20240412-114043_front_returnGood_70f75407b7ae_540_17327712807.mp4
6907992106311_20240412-110328_back_addGood_70f75407b7ae_2565_17327712807.mp4
6907992106311_20240412-110328_front_addGood_70f75407b7ae_2565_17327712807.mp4
6907992106311_20240412-110342_back_returnGood_70f75407b7ae_2570_17327712807.mp4
6907992106311_20240412-110342_front_returnGood_70f75407b7ae_2570_17327712807.mp4
6920152400630_20240412-112256_back_addGood_70f75407b7ae_1230_17327712807.mp4
6920152400630_20240412-112256_front_addGood_70f75407b7ae_1230_17327712807.mp4
6920152400630_20240412-112308_back_returnGood_70f75407b7ae_1230_17327712807.mp4
6920152400630_20240412-112308_front_returnGood_70f75407b7ae_1230_17327712807.mp4
6920174757101_20240412-112806_back_addGood_70f75407b7ae_2120_17327712807.mp4
6920174757101_20240412-112806_front_addGood_70f75407b7ae_2120_17327712807.mp4
6920174757101_20240412-112823_back_returnGood_70f75407b7ae_2125_17327712807.mp4
6920174757101_20240412-112823_front_returnGood_70f75407b7ae_2125_17327712807.mp4
6920459905012_20240412-104811_back_addGood_70f754088050_840_17788571404.mp4
6920459905012_20240412-104811_front_addGood_70f754088050_840_17788571404.mp4
6920459905012_20240412-104906_back_returnGood_70f754088050_840_17788571404.mp4
6920459905012_20240412-104906_front_returnGood_70f754088050_840_17788571404.mp4
6920459905012_20240412-105345_back_addGood_70f754088050_830_17788571404.mp4
6920459905012_20240412-105345_front_addGood_70f754088050_830_17788571404.mp4
6920459905012_20240412-105356_back_returnGood_70f754088050_830_17788571404.mp4
6920459905012_20240412-105356_front_returnGood_70f754088050_830_17788571404.mp4
6920459905012_20240412-113755_back_addGood_70f75407b7ae_970_17327712807.mp4
6920459905012_20240412-113755_front_addGood_70f75407b7ae_970_17327712807.mp4
6920459905012_20240412-113808_back_returnGood_70f75407b7ae_970_17327712807.mp4
6920459905012_20240412-113808_front_returnGood_70f75407b7ae_970_17327712807.mp4
6920907810707_20240412-105728_back_addGood_70f754088050_150_17788571404.mp4
6920907810707_20240412-105728_front_addGood_70f754088050_150_17788571404.mp4
6920907810707_20240412-105739_back_returnGood_70f754088050_145_17788571404.mp4
6920907810707_20240412-105739_front_returnGood_70f754088050_145_17788571404.mp4
6920907810707_20240412-113710_back_addGood_70f75407b7ae_900_17327712807.mp4
6920907810707_20240412-113710_front_addGood_70f75407b7ae_900_17327712807.mp4
6920907810707_20240412-113720_back_returnGood_70f75407b7ae_900_17327712807.mp4
6920907810707_20240412-113720_front_returnGood_70f75407b7ae_900_17327712807.mp4
6922130119213_20240412-111854_back_addGood_70f75407b7ae_1310_17327712807.mp4
6922130119213_20240412-111854_front_addGood_70f75407b7ae_1310_17327712807.mp4
6922130119213_20240412-111904_back_returnGood_70f75407b7ae_1310_17327712807.mp4
6922130119213_20240412-111904_front_returnGood_70f75407b7ae_1310_17327712807.mp4
6922577700968_20240412-114323_back_addGood_70f75407b7ae_1045_17327712807.mp4
6922577700968_20240412-114323_front_addGood_70f75407b7ae_1045_17327712807.mp4
6922577700968_20240412-114333_back_returnGood_70f75407b7ae_1310_17327712807.mp4
6922577700968_20240412-114333_front_returnGood_70f75407b7ae_1310_17327712807.mp4
6922868291168_20240412-112724_back_addGood_70f75407b7ae_4540_17327712807.mp4
6922868291168_20240412-112724_front_addGood_70f75407b7ae_4540_17327712807.mp4
6922868291168_20240412-112736_back_returnGood_70f75407b7ae_4540_17327712807.mp4
6922868291168_20240412-112736_front_returnGood_70f75407b7ae_4540_17327712807.mp4
6923644286293_20240412-114002_back_addGood_70f75407b7ae_1535_17327712807.mp4
6923644286293_20240412-114002_front_addGood_70f75407b7ae_1535_17327712807.mp4
6923644286293_20240412-114012_back_returnGood_70f75407b7ae_1535_17327712807.mp4
6923644286293_20240412-114012_front_returnGood_70f75407b7ae_1535_17327712807.mp4
6924743915824_20240412-104437_back_addGood_70f754088050_455_17788571404.mp4
6924743915824_20240412-104437_front_addGood_70f754088050_455_17788571404.mp4
6924743915824_20240412-104448_back_returnGood_70f754088050_455_17788571404.mp4
6924743915824_20240412-104448_front_returnGood_70f754088050_455_17788571404.mp4
6924743915824_20240412-113553_back_addGood_70f75407b7ae_920_17327712807.mp4
6924743915824_20240412-113553_front_addGood_70f75407b7ae_920_17327712807.mp4
6924743915824_20240412-113603_back_returnGood_70f75407b7ae_920_17327712807.mp4
6924743915824_20240412-113603_front_returnGood_70f75407b7ae_920_17327712807.mp4
6928804011173_20240412-105808_back_addGood_70f754088050_915_17788571404.mp4
6928804011173_20240412-105808_front_addGood_70f754088050_915_17788571404.mp4
6928804011173_20240412-105818_back_returnGood_70f754088050_910_17788571404.mp4
6928804011173_20240412-105818_front_returnGood_70f754088050_910_17788571404.mp4
6934665095108_20240412-114106_back_addGood_70f75407b7ae_885_17327712807.mp4
6934665095108_20240412-114106_front_addGood_70f75407b7ae_885_17327712807.mp4
6934665095108_20240412-114116_back_returnGood_70f75407b7ae_885_17327712807.mp4
6934665095108_20240412-114116_front_returnGood_70f75407b7ae_885_17327712807.mp4
6935270642121_20240412-111602_back_addGood_70f75407b7ae_540_17327712807.mp4
6935270642121_20240412-111602_front_addGood_70f75407b7ae_540_17327712807.mp4
6935270642121_20240412-111614_back_returnGood_70f75407b7ae_540_17327712807.mp4
6935270642121_20240412-111614_front_returnGood_70f75407b7ae_540_17327712807.mp4
6952074634794_20240412-104633_back_addGood_70f754088050_855_17788571404.mp4
6952074634794_20240412-104633_front_addGood_70f754088050_855_17788571404.mp4
6952074634794_20240412-104700_back_returnGood_70f754088050_855_17788571404.mp4
6952074634794_20240412-104700_front_returnGood_70f754088050_855_17788571404.mp4
6952074634794_20240412-113515_back_addGood_70f75407b7ae_595_17327712807.mp4
6952074634794_20240412-113515_front_addGood_70f75407b7ae_595_17327712807.mp4
6952074634794_20240412-113524_back_returnGood_70f75407b7ae_595_17327712807.mp4
6952074634794_20240412-113524_front_returnGood_70f75407b7ae_595_17327712807.mp4
6972378998200_20240412-111721_back_addGood_70f75407b7ae_870_17327712807.mp4
6972378998200_20240412-111721_front_addGood_70f75407b7ae_870_17327712807.mp4
6972378998200_20240412-111741_back_returnGood_70f75407b7ae_865_17327712807.mp4
6972378998200_20240412-111741_front_returnGood_70f75407b7ae_865_17327712807.mp4

View File

@ -0,0 +1,12 @@
采集文件名字段规则:
230537101280010007_20240411-144945_back_returnGood_70f75407b7ae_565_17788571404.mp4
String targetName =
barCode + "_" (条形码字段)
+ recordFileName + "_" (文件名字段:时间格式精确到秒)
+ "back/front"+ "_" (后/前摄字段)
+ "addGood/returnGood"+ "_"(加/退购字段)
+ macId + "_" mac地址字段去除中间冒号
+ Math.abs(goodsWeight) + "_" (商品重量字段:变化的绝对值)
+ user.phone (采集人手机号字段)
+ ".mp4";

View File

@ -0,0 +1,6 @@
6920152400630_20240412-151024_back_addGood_70f754088050_580_13725988807.mp4
6920152400630_20240412-151024_front_addGood_70f754088050_580_13725988807.mp4
6920152400630_20240412-151036_front_returnGood_70f754088050_580_13725988807.mp4
6920459905012_20240412-150815_back_addGood_70f754088050_550_13725988807.mp4
6920459905012_20240412-150815_front_addGood_70f754088050_550_13725988807.mp4
6920459905012_20240412-150826_front_returnGood_70f754088050_550_13725988807.mp4

View File

@ -0,0 +1,151 @@
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 19 18:17:55 2023
@author: ym
"""
import cv2
import os
import numpy as np
def tempt_add_adjc():
temp = cv2.imread("img.png")
path = r"D:\DeepLearning\yolov5\runs\trajectory"
patr = r"D:\DeepLearning\yolov5\tracking\result"
for filename in os.listdir(path):
imgpath = os.path.join(path, filename)
img = cv2.imread(imgpath)
img1 = cv2.add(img, temp)
img1path = os.path.join(patr, filename)
cv2.imwrite(img1path, img1)
def temp_add_boarder():
temp = cv2.imread("cartedge.png")
temp[640:, 0:20, :] = 255
temp[640:, -20:, :] = 255
temp[-20:, :, :] = 255
cv2.imwrite("cartboarder.png", temp)
def create_front_temp():
image = cv2.imread("image_front.png")
Height, Width = image.shape[:2]
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh, binary = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY_INV)
board = cv2.bitwise_not(binary)
contours, _ = cv2.findContours(board, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
k = 0
for cnt in contours:
img = np.zeros((Height, Width), dtype=np.uint8)
cv2.drawContours(img, [cnt], -1, 255, 3)
k += 1
cv2.imwrite(f"fronttemp_{k}.png", img)
imgshow = cv2.drawContours(image, contours, -1, (0,255,0), 3)
cv2.imwrite("board_ftmp_line.png", imgshow)
# cv2.imwrite("4.png", board)
# cv2.imwrite("1.png", gray)
# cv2.imwrite("2.png", binary)
def create_back_temp():
'''
image1.png从中获取轮廓的初始图像
image2.png主要用于显示效果
Returnimg.png
'''
image = cv2.imread("image1.png")
Height, Width = image.shape[:2]
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray[:405, :] = 0
thresh, binary = cv2.threshold(gray, 254, 255, cv2.THRESH_BINARY)
cv2.imwrite("shopcart.png", binary)
imgshow = cv2.cvtColor(binary, cv2.COLOR_GRAY2BGR)
contours, _ = cv2.findContours(binary, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
imgshow = cv2.drawContours(imgshow, contours, -1, (0,255,0), 1)
cv2.imwrite("imgshow.png", imgshow)
image2 = cv2.imread("image2.png")
image2 = cv2.drawContours(image2, contours, -1, (0,255,0), 3)
for cnt in contours:
_, start, _, num = cv2.boundingRect(cnt)
x1 = (cnt[:, 0, 0] != 0)
x2 = (cnt[:, 0, 0] != Width-1)
x3 = (cnt[:, 0, 1] != Height-1)
x = (x1 & x2) & x3
idx = np.where(x)
cntx = cnt[idx, :, :][0]
cnt1 = cntx[:,0,:].copy()
cntx[:, 0, 1] -= 60
cnt2 = cntx[:,0,:].copy()
cv2.drawContours(image2,[cntx], 0, (0,0,255), 2)
img = np.zeros(gray.shape, np.uint8)
for i in range(len(cnt1)):
x1, y1 = cnt1[i]
x2, y2 = cnt2[i]
cv2.rectangle(img, (x1-1, y1-1), (x1+1, y1+1), 255, 1)
cv2.rectangle(img, (x2-1, y2-1), (x2+1, y2+1), 255, 1)
cv2.imwrite("img.png", img)
if __name__ == "__main__":
# create_back_temp()
# temp_add_boarder()
# tempt_add_adjc()
create_front_temp()

View File

@ -0,0 +1 @@
求取购物车轮廓,判断任一点是否在购物车内、是否在购物车边框附近

173
tracking/test_merge.py Normal file
View File

@ -0,0 +1,173 @@
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 23 11:04:48 2024
@author: ym
"""
import numpy as np
import cv2
from scipy.spatial.distance import cdist
# from trackers.utils import matching
def readDict(boxes, feat_dicts):
feat = []
for i in range(boxes.shape[0]):
tid, fid, bid = int(boxes[i, 4]), int(boxes[i, 7]), int(boxes[i, 8])
feat.append(feat_dicts[fid][bid])
# img = feat_dicts[fid][f'{bid}_img']
# cv2.imwrite(f'./result/imgs/{tid}_{fid}_{bid}.png', img)
return np.asarray(feat, dtype=np.float32)
def track_equal_track(atrack, btrack, feat_dicts):
# boxes: [x, y, w, h, track_id, score, cls, frame_index, box_index]
aboxes = atrack.boxes
bboxes = btrack.boxes
''' 1. 判断轨迹在时序上是否有交集 '''
afids = aboxes[:, 7].astype(np.int_)
bfids = bboxes[:, 7].astype(np.int_)
# 帧索引交集
interfid = set(afids).intersection(set(bfids))
# 或者直接判断帧索引是否有交集,返回 Ture or False
# interfid = set(afids).isdisjoint(set(bfids))
''' 2. 轨迹空间iou'''
alabel = np.array([0] * afids.size, dtype=np.int_)
blabel = np.array([1] * bfids.size, dtype=np.int_)
label = np.concatenate((alabel, blabel), axis=0)
fids = np.concatenate((afids, bfids), axis=0)
indices = np.argsort(fids)
idx_pair = []
for i in range(len(indices)-1):
idx1, idx2 = indices[i], indices[i+1]
if label[idx1] != label[idx2] and fids[idx2] - fids[idx1] == 1:
if label[idx1] == 0:
a_idx = idx1
b_idx = idx2-alabel.size
else:
a_idx = idx2
b_idx = idx1-alabel.size
idx_pair.append((a_idx, b_idx))
ious = []
for a, b in idx_pair:
abox, bbox = aboxes[a, :], bboxes[b, :]
xa1, ya1 = abox[0] - abox[2]/2, abox[1] - abox[3]/2
xa2, ya2 = abox[0] + abox[2]/2, abox[1] + abox[3]/2
xb1, yb1 = bbox[0] - bbox[2]/2, bbox[1] - bbox[3]/2
xb2, yb2 = bbox[0] + bbox[2]/2, bbox[1] + bbox[3]/2
inter = (np.minimum(xb2, xa2) - np.maximum(xb1, xa1)).clip(0) * \
(np.minimum(yb2, ya2) - np.maximum(yb1, ya1)).clip(0)
# Union Area
box1_area = abox[2] * abox[3]
box2_area = bbox[2] * bbox[3]
union = box1_area + box2_area - inter + 1e-6
ious.append(inter/union)
''' 3. 轨迹特征相似度判断'''
afeat = readDict(aboxes, feat_dicts)
bfeat = readDict(bboxes, feat_dicts)
feat = np.concatenate((afeat, bfeat), axis=0)
emb_simil = 1-np.maximum(0.0, cdist(feat, feat, 'cosine'))
emb_ = 1-cdist(np.mean(afeat, axis=0)[None, :], np.mean(bfeat, axis=0)[None, :], 'cosine')
cont1 = False if len(interfid) else True
cont2 = all(iou>0.5 for iou in ious)
cont3 = emb_[0, 0]>0.75
cont = cont1 and cont2 and cont3
return cont
def track_equal_str(atrack, btrack):
if atrack == btrack:
return True
else:
return False
def merge_track(Residual):
out_list = []
alist = [t for t in Residual]
while alist:
atrack = alist[0]
cur_list = []
cur_list.append(atrack)
alist.pop(0)
blist = [b for b in alist]
alist = []
for btrack in blist:
if track_equal_str(atrack, btrack):
cur_list.append(btrack)
else:
alist.append(btrack)
out_list.append(cur_list)
return out_list
def main():
Residual = ['a', 'b', 'c', 'd', 'a', 'b', 'c', 'b', 'c', 'd']
out_list = merge_track(Residual)
print(Residual)
print(out_list)
if __name__ == "__main__":
main()
# =============================================================================
# for i, atrack in enumerate(input_list):
# cur_list = []
# cur_list.append(atrack)
# del input_list[i]
#
# for j, btrack in enumerate(input_list):
# if track_equal(atrack, btrack):
# cur_list.append(btrack)
# del input_list[j]
#
# out_list.append(cur_list)
# =============================================================================

149
tracking/test_tracking.py Normal file
View File

@ -0,0 +1,149 @@
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 20 17:33:00 2023
@author: ym
"""
import cv2
import os
import numpy as np
import time
import pickle
import matplotlib.pyplot as plt
import pandas as pd
from scipy.spatial.distance import cdist
from pathlib import Path
# ================= using for import ultralytics
import sys
sys.path.append(r"D:\yolov5track")
from utils.gen import Profile
from dotrack.dotracks_back import doBackTracks
from dotrack.dotracks_front import doFrontTracks
from utils.drawtracks import draw5points, drawTrack, drawtracefeat, plot_frameID_y2, drawFeatures, draw_all_trajectories
# from datetime import datetime
# from utils.proBoxes import boxes_add_fid
# from utils.plotting import boxing_img #, Annotator, colors,
# from utils import Boxes, IterableSimpleNamespace, yaml_load
# from trackers import BOTSORT, BYTETracker
# from utils.mergetrack import track_equal_track
# from utils.basetrack import MoveState, ShoppingCart, doTracks
def detect_start_end(bboxes, features_dict, filename):
boxes = np.empty(shape=(0, 9), dtype = np.float)
if filename.find("back") >= 0:
vts = doBackTracks(bboxes, features_dict)
vtx = [t for t in vts if t.cls != 0]
for track in vtx:
if track.moving_index.size:
boxes = np.concatenate((boxes, track.moving_index), axis=0)
elif filename.find("front") >= 0:
vts = doFrontTracks(bboxes, features_dict)
vtx = [t for t in vts if t.cls != 0]
for track in vtx:
for start, end in track.dynamic_y2:
boxes = np.concatenate((boxes, track.boxes[start:end+1, :]), axis=0)
for start, end in track.dynamic_y1:
boxes = np.concatenate((boxes, track.boxes[start:end+1, :]), axis=0)
start = np.min(boxes[:, 0])
end = np.max(boxes[:, 1])
if start > 5:
start = start - 5
else:
start = 0
return start, end
def save_subimgs(vts, file):
imgdir = Path(f'./result/imgs/{file}')
if not imgdir.exists():
imgdir.mkdir(parents=True, exist_ok=True)
for i, track in enumerate(vts.Residual):
boxes = track.boxes
for ii in range(len(boxes)):
tid, fid, bid = int(boxes[ii, 4]), int(boxes[ii, 7]), int(boxes[ii, 8])
img = vts.TracksDict[f"frame_{fid}"]["imgs"][bid]
# feat = TracksDict[f"frame_{fid}"]["feats"][bid]
# box = TracksDict[f"frame_{fid}"]["boxes"][bid]
cv2.imwrite(str(imgdir) + f"/{tid}_{fid}_{bid}.png", img)
def have_tracked():
trackdict = r'./data/trackdicts'
alltracks = []
k = 0
gt = Profile()
for filename in os.listdir(trackdict):
filename = 'test_20240402-173935_6920152400975_front_174037379.pkl'
file, ext = os.path.splitext(filename)
filepath = os.path.join(trackdict, filename)
TracksDict = np.load(filepath, allow_pickle=True)
bboxes = TracksDict['TrackBoxes']
with gt:
if filename.find("front") >= 0:
vts = doFrontTracks(bboxes, TracksDict)
vts.classify()
save_subimgs(vts, file)
plt = plot_frameID_y2(vts)
savedir = save_dir.joinpath(f'{file}_y2.png')
plt.savefig(savedir)
plt.close()
else:
vts = doBackTracks(bboxes, TracksDict)
vts.classify()
alltracks.append(vts)
save_subimgs(vts, file)
edgeline = cv2.imread("./shopcart/cart_tempt/edgeline.png")
draw_all_trajectories(vts, edgeline, save_dir, filename)
print(file+f" need time: {gt.dt:.2f}s")
# k += 1
# if k==1:
# break
if len(alltracks):
drawFeatures(alltracks, save_dir)
if __name__ == "__main__":
# now = datetime.now()
# time_string = now.strftime("%Y%m%d%H%M%S")[:8]
save_dir = Path('./result/tracks')
if not save_dir.exists():
save_dir.mkdir(parents=True, exist_ok=True)
have_tracked()

View File

@ -0,0 +1,94 @@
# Tracker
## Supported Trackers
- [x] ByteTracker
- [x] BoT-SORT
## Usage
### python interface:
You can use the Python interface to track objects using the YOLO model.
```python
from ultralytics import YOLO
model = YOLO("yolov8n.pt") # or a segmentation model .i.e yolov8n-seg.pt
model.track(
source="video/streams",
stream=True,
tracker="botsort.yaml", # or 'bytetrack.yaml'
show=True,
)
```
You can get the IDs of the tracked objects using the following code:
```python
from ultralytics import YOLO
model = YOLO("yolov8n.pt")
for result in model.track(source="video.mp4"):
print(
result.boxes.id.cpu().numpy().astype(int)
) # this will print the IDs of the tracked objects in the frame
```
If you want to use the tracker with a folder of images or when you loop on the video frames, you should use the `persist` parameter to tell the model that these frames are related to each other so the IDs will be fixed for the same objects. Otherwise, the IDs will be different in each frame because in each loop, the model creates a new object for tracking, but the `persist` parameter makes it use the same object for tracking.
```python
import cv2
from ultralytics import YOLO
cap = cv2.VideoCapture("video.mp4")
model = YOLO("yolov8n.pt")
while True:
ret, frame = cap.read()
if not ret:
break
results = model.track(frame, persist=True)
boxes = results[0].boxes.xyxy.cpu().numpy().astype(int)
ids = results[0].boxes.id.cpu().numpy().astype(int)
for box, id in zip(boxes, ids):
cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2)
cv2.putText(
frame,
f"Id {id}",
(box[0], box[1]),
cv2.FONT_HERSHEY_SIMPLEX,
1,
(0, 0, 255),
2,
)
cv2.imshow("frame", frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
```
## Change tracker parameters
You can change the tracker parameters by editing the `tracker.yaml` file which is located in the ultralytics/cfg/trackers folder.
## Command Line Interface (CLI)
You can also use the command line interface to track objects using the YOLO model.
```bash
yolo detect track source=... tracker=...
yolo segment track source=... tracker=...
yolo pose track source=... tracker=...
```
By default, trackers will use the configuration in `ultralytics/cfg/trackers`. We also support using a modified tracker config file. Please refer to the tracker config files in `ultralytics/cfg/trackers`.
## Contribute to Our Trackers Section
Are you proficient in multi-object tracking and have successfully implemented or adapted a tracking algorithm with Ultralytics YOLO? We invite you to contribute to our Trackers section! Your real-world applications and solutions could be invaluable for users working on tracking tasks.
By contributing to this section, you help expand the scope of tracking solutions available within the Ultralytics YOLO framework, adding another layer of functionality and utility for the community.
To initiate your contribution, please refer to our [Contributing Guide](https://docs.ultralytics.com/help/contributing) for comprehensive instructions on submitting a Pull Request (PR) 🛠️. We are excited to see what you bring to the table!
Together, let's enhance the tracking capabilities of the Ultralytics YOLO ecosystem 🙏!

View File

@ -0,0 +1,10 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
from .bot_sort import BOTSORT
from .byte_tracker import BYTETracker
from .track import register_tracker
__all__ = 'register_tracker', 'BOTSORT', 'BYTETracker' # allow simpler import

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,71 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
from collections import OrderedDict
import numpy as np
class TrackState:
"""Enumeration of possible object tracking states."""
New = 0
Tracked = 1
Lost = 2
Removed = 3
class BaseTrack:
"""Base class for object tracking, handling basic track attributes and operations."""
_count = 0
track_id = 0
is_activated = False
state = TrackState.New
history = OrderedDict()
features = []
curr_feature = None
score = 0
start_frame = 0
frame_id = 0
time_since_update = 0
# Multi-camera
location = (np.inf, np.inf)
@property
def end_frame(self):
"""Return the last frame ID of the track."""
return self.frame_id
@staticmethod
def next_id():
"""Increment and return the global track ID counter."""
BaseTrack._count += 1
return BaseTrack._count
def activate(self, *args):
"""Activate the track with the provided arguments."""
raise NotImplementedError
def predict(self):
"""Predict the next state of the track."""
raise NotImplementedError
def update(self, *args, **kwargs):
"""Update the track with new observations."""
raise NotImplementedError
def mark_lost(self):
"""Mark the track as lost."""
self.state = TrackState.Lost
def mark_removed(self):
"""Mark the track as removed."""
self.state = TrackState.Removed
@staticmethod
def reset_id():
"""Reset the global track ID counter."""
BaseTrack._count = 0

View File

@ -0,0 +1,198 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
from collections import deque
import numpy as np
from .basetrack import TrackState
from .byte_tracker import BYTETracker, STrack
from .utils import matching
# from .utils.gmc import GMC
from .utils.kalman_filter import KalmanFilterXYWH
from .reid.reid_interface import ReIDInterface
from .reid.config import config
class BOTrack(STrack):
shared_kalman = KalmanFilterXYWH()
def __init__(self, tlwh, score, cls, feat=None, feat_history=50):
"""Initialize YOLOv8 object with temporal parameters, such as feature history, alpha and current features."""
super().__init__(tlwh, score, cls)
self.smooth_feat = None
self.curr_feat = None
if feat is not None:
self.update_features(feat)
self.features = deque([], maxlen=feat_history)
self.alpha = 0.9
def update_features(self, feat):
"""Update features vector and smooth it using exponential moving average."""
feat /= np.linalg.norm(feat)
self.curr_feat = feat
if self.smooth_feat is None:
self.smooth_feat = feat
else:
self.smooth_feat = self.alpha * self.smooth_feat + (1 - self.alpha) * feat
self.features.append(feat)
self.smooth_feat /= np.linalg.norm(self.smooth_feat)
def predict(self):
"""Predicts the mean and covariance using Kalman filter."""
mean_state = self.mean.copy()
if self.state != TrackState.Tracked:
mean_state[6] = 0
mean_state[7] = 0
self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)
def re_activate(self, new_track, frame_id, new_id=False):
"""Reactivates a track with updated features and optionally assigns a new ID."""
if new_track.curr_feat is not None:
self.update_features(new_track.curr_feat)
super().re_activate(new_track, frame_id, new_id)
def update(self, new_track, frame_id):
"""Update the YOLOv8 instance with new track and frame ID."""
if new_track.curr_feat is not None:
self.update_features(new_track.curr_feat)
super().update(new_track, frame_id)
@property
def tlwh(self):
"""Get current position in bounding box format `(top left x, top left y,
width, height)`.
"""
if self.mean is None:
return self._tlwh.copy()
ret = self.mean[:4].copy()
ret[:2] -= ret[2:] / 2
return ret
@staticmethod
def multi_predict(stracks):
"""Predicts the mean and covariance of multiple object tracks using shared Kalman filter."""
if len(stracks) <= 0:
return
multi_mean = np.asarray([st.mean.copy() for st in stracks])
multi_covariance = np.asarray([st.covariance for st in stracks])
for i, st in enumerate(stracks):
if st.state != TrackState.Tracked:
multi_mean[i][6] = 0
multi_mean[i][7] = 0
multi_mean, multi_covariance = BOTrack.shared_kalman.multi_predict(multi_mean, multi_covariance)
for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
stracks[i].mean = mean
stracks[i].covariance = cov
def convert_coords(self, tlwh):
"""Converts Top-Left-Width-Height bounding box coordinates to X-Y-Width-Height format."""
return self.tlwh_to_xywh(tlwh)
@staticmethod
def tlwh_to_xywh(tlwh):
"""Convert bounding box to format `(center x, center y, width,
height)`.
"""
ret = np.asarray(tlwh).copy()
ret[:2] += ret[2:] / 2
return ret
class BOTSORT(BYTETracker):
def __init__(self, args, frame_rate=30):
"""Initialize YOLOv8 object with ReID module and GMC algorithm."""
super().__init__(args, frame_rate)
# ReID module
self.proximity_thresh = args.proximity_thresh
self.appearance_thresh = args.appearance_thresh
if args.with_reid:
# Haven't supported BoT-SORT(reid) yet
self.encoder = ReIDInterface(config)
# self.gmc = GMC(method=args.gmc_method) # commented by WQG
def get_kalmanfilter(self):
"""Returns an instance of KalmanFilterXYWH for object tracking."""
return KalmanFilterXYWH()
def init_track(self, dets, scores, cls, imgs):
"""Initialize track with detections, scores, and classes."""
if len(dets) == 0:
return []
if self.args.with_reid and self.encoder is not None:
features_keep = self.encoder.inference(imgs, dets)
return [BOTrack(xyxy, s, c, f) for (xyxy, s, c, f) in zip(dets, scores, cls, features_keep)] # detections
else:
return [BOTrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)] # detections
def get_dists(self, tracks, detections):
"""Get distances between tracks and detections using IoU and (optionally) ReID embeddings."""
dists = matching.iou_distance(tracks, detections)
# proximity_thresh 应该设较大的值表示只有两个boxes离得较远时不考虑reid特征
dists_mask = (dists > self.proximity_thresh)
# TODO: mot20
# if not self.args.mot20:
dists = matching.fuse_score(dists, detections)
if self.args.with_reid and self.encoder is not None:
emb_dists = matching.embedding_distance(tracks, detections) / 2.0
emb_dists[emb_dists > self.appearance_thresh] = 1.0
emb_dists[dists_mask] = 1.0
dists = np.minimum(dists, emb_dists)
return dists
def get_dists_1(self, tracks, detections):
"""Get distances between tracks and detections using IoU and (optionally) ReID embeddings."""
iou_dists = matching.iou_distance(tracks, detections)
iou_dists_mask = (iou_dists>0.9)
iou_dists = matching.fuse_score(iou_dists, detections)
weight = 0.4
if self.args.with_reid and self.encoder is not None:
emb_dists = matching.embedding_distance(tracks, detections)
'''============ iou_dists 和 emb_dists 融合有两种策略 ==========='''
'''1. reid 相似度阈值,低于该值的两 boxes 图像不可能是同一对象,需要确定一个合理的可信阈值
2. iou 的约束为若约束,故 iou_dists 应设置为较大的值
'''
emb_dists_mask = (emb_dists > 0.85)
iou_dists[emb_dists_mask] = 1
emb_dists[iou_dists_mask] = 1
dists = np.minimum(iou_dists, emb_dists)
'''2. embed 阈值'''
# dists = (1-weight)*iou_dists + weight*emb_dists
else:
dists = iou_dists.copy()
return dists
def multi_predict(self, tracks):
"""Predict and track multiple objects with YOLOv8 model."""
BOTrack.multi_predict(tracks)
def get_result(self):
'''written by WQG'''
activate_tracks = np.asarray([x.tlbr.tolist() + [x.track_id, x.score, x.cls, x.idx]
for x in self.tracked_stracks if x.is_activated], dtype=np.float32)
track_features = []
if self.args.with_reid and self.encoder is not None:
track_features = np.asarray([x.curr_feat for x in self.tracked_stracks if x.is_activated], dtype=np.float32)
return (activate_tracks, track_features)

View File

@ -0,0 +1,464 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
import numpy as np
from .basetrack import BaseTrack, TrackState
from .utils import matching
from .utils.kalman_filter import KalmanFilterXYAH
def dists_update(dists, strack_pool, detections):
'''written by WQG'''
if len(strack_pool) and len(detections):
# alabel = np.array([int(stack.cls) if int(stack.cls)==0 or int(stack.cls)==9 else -1 for stack in strack_pool])
# blabel = np.array([int(stack.cls) if int(stack.cls)==0 or int(stack.cls)==9 else -1 for stack in detections])
alabel = np.array([int(stack.cls) for stack in strack_pool])
blabel = np.array([int(stack.cls) for stack in detections])
amlabel = np.expand_dims(alabel, axis=1).repeat(len(detections),axis=1)
bmlabel = np.expand_dims(blabel, axis=0).repeat(len(strack_pool),axis=0)
dist_label = 1 - (bmlabel == amlabel)
dists = np.where(dists > dist_label, dists, dist_label)
return dists
class STrack(BaseTrack):
shared_kalman = KalmanFilterXYAH()
def __init__(self, tlwh, score, cls):
"""wait activate."""
self._tlwh = np.asarray(self.tlbr_to_tlwh(tlwh[:-1]), dtype=np.float32)
self.kalman_filter = None
self.mean, self.covariance = None, None
self.is_activated = False
self.first_find = False ###
self.score = score
self.tracklet_len = 0
self.cls = cls
self.idx = tlwh[-1]
def predict(self):
"""Predicts mean and covariance using Kalman filter."""
mean_state = self.mean.copy()
if self.state != TrackState.Tracked:
mean_state[7] = 0
self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)
@staticmethod
def multi_predict(stracks):
"""Perform multi-object predictive tracking using Kalman filter for given stracks."""
if len(stracks) <= 0:
return
multi_mean = np.asarray([st.mean.copy() for st in stracks])
multi_covariance = np.asarray([st.covariance for st in stracks])
for i, st in enumerate(stracks):
if st.state != TrackState.Tracked:
multi_mean[i][7] = 0
multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance)
for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
stracks[i].mean = mean
stracks[i].covariance = cov
@staticmethod
def multi_gmc(stracks, H=np.eye(2, 3)):
"""Update state tracks positions and covariances using a homography matrix."""
if len(stracks) > 0:
multi_mean = np.asarray([st.mean.copy() for st in stracks])
multi_covariance = np.asarray([st.covariance for st in stracks])
R = H[:2, :2]
R8x8 = np.kron(np.eye(4, dtype=float), R)
t = H[:2, 2]
for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
mean = R8x8.dot(mean)
mean[:2] += t
cov = R8x8.dot(cov).dot(R8x8.transpose())
stracks[i].mean = mean
stracks[i].covariance = cov
def activate(self, kalman_filter, frame_id):
"""Start a new tracklet."""
self.kalman_filter = kalman_filter
self.track_id = self.next_id()
self.mean, self.covariance = self.kalman_filter.initiate(self.convert_coords(self._tlwh))
self.tracklet_len = 0
self.state = TrackState.Tracked
if frame_id == 1:
self.is_activated = True
else:
self.first_find = True ### Add by WQG
self.frame_id = frame_id
self.start_frame = frame_id
def re_activate(self, new_track, frame_id, new_id=False):
"""Reactivates a previously lost track with a new detection."""
self.mean, self.covariance = self.kalman_filter.update(self.mean, self.covariance,
self.convert_coords(new_track.tlwh))
self.tracklet_len = 0
self.state = TrackState.Tracked
self.is_activated = True
self.frame_id = frame_id
if new_id:
self.track_id = self.next_id()
self.score = new_track.score
self.cls = new_track.cls
self.idx = new_track.idx
self._tlwh = new_track._tlwh
def update(self, new_track, frame_id):
"""
Update a matched track
:type new_track: STrack
:type frame_id: int
:return:
"""
self.frame_id = frame_id
self.tracklet_len += 1
new_tlwh = new_track.tlwh
self.mean, self.covariance = self.kalman_filter.update(self.mean, self.covariance,
self.convert_coords(new_tlwh))
self.state = TrackState.Tracked
self.is_activated = True
self.score = new_track.score
self.cls = new_track.cls
self.idx = new_track.idx
self._tlwh = new_track._tlwh
def convert_coords(self, tlwh):
"""Convert a bounding box's top-left-width-height format to its x-y-angle-height equivalent."""
return self.tlwh_to_xyah(tlwh)
@property
def tlwh(self):
"""Get current position in bounding box format `(top left x, top left y,
width, height)`.
"""
if self.mean is None:
return self._tlwh.copy()
ret = self.mean[:4].copy()
ret[2] *= ret[3]
ret[:2] -= ret[2:] / 2
return ret
@property
def tlbr(self):
"""Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
`(top left, bottom right)`.
"""
ret = self.tlwh.copy()
ret[2:] += ret[:2]
return ret
@staticmethod
def tlwh_to_xyah(tlwh):
"""Convert bounding box to format `(center x, center y, aspect ratio,
height)`, where the aspect ratio is `width / height`.
"""
ret = np.asarray(tlwh).copy()
ret[:2] += ret[2:] / 2
ret[2] /= ret[3]
return ret
@staticmethod
def tlbr_to_tlwh(tlbr):
"""Converts top-left bottom-right format to top-left width height format."""
ret = np.asarray(tlbr).copy()
ret[2:] -= ret[:2]
return ret
@staticmethod
def tlwh_to_tlbr(tlwh):
"""Converts tlwh bounding box format to tlbr format."""
ret = np.asarray(tlwh).copy()
ret[2:] += ret[:2]
return ret
def __repr__(self):
"""Return a string representation of the BYTETracker object with start and end frames and track ID."""
return f'OT_{self.track_id}_({self.start_frame}-{self.end_frame})'
class BYTETracker:
def __init__(self, args, frame_rate=30):
"""Initialize a YOLOv8 object to track objects with given arguments and frame rate."""
self.tracked_stracks = [] # type: list[STrack]
self.lost_stracks = [] # type: list[STrack]
self.removed_stracks = [] # type: list[STrack]
self.frame_id = 0
self.args = args
self.max_time_lost = int(frame_rate / 30.0 * args.track_buffer)
self.kalman_filter = self.get_kalmanfilter()
self.reset_id()
# Add by WQG
self.args.new_track_thresh = 0.5
def update(self, results, img=None):
"""Updates object tracker with new detections and returns tracked object bounding boxes."""
self.frame_id += 1
activated_stracks = []
refind_stracks = []
lost_stracks = []
removed_stracks = []
scores = results.conf
cls = results.cls
# =============================================================================
# # get xyxy and add index
# bboxes = results.xyxy
# bboxes = np.concatenate([bboxes, np.arange(len(bboxes)).reshape(-1, 1)], axis=-1)
# =============================================================================
bboxes = results.xyxyb
remain_inds = scores > self.args.track_high_thresh
inds_low = scores > self.args.track_low_thresh
inds_high = scores < self.args.track_high_thresh
inds_second = np.logical_and(inds_low, inds_high)
dets_second = bboxes[inds_second]
dets = bboxes[remain_inds]
scores_keep = scores[remain_inds]
scores_second = scores[inds_second]
cls_keep = cls[remain_inds]
cls_second = cls[inds_second]
detections = self.init_track(dets, scores_keep, cls_keep, img)
# Add newly detected tracklets to tracked_stracks
unconfirmed = []
tracked_stracks = [] # type: list[STrack]
for track in self.tracked_stracks:
if not track.is_activated:
unconfirmed.append(track)
else:
tracked_stracks.append(track)
# Step 2: First association, with high score detection boxes
strack_pool = self.joint_stracks(tracked_stracks, self.lost_stracks)
# Predict the current location with KF
self.multi_predict(strack_pool)
# ============================================================= 没必要gmcWQG
# if hasattr(self, 'gmc') and img is not None:
# warp = self.gmc.apply(img, dets)
# STrack.multi_gmc(strack_pool, warp)
# STrack.multi_gmc(unconfirmed, warp)
# =============================================================================
dists = self.get_dists_1(strack_pool, detections)
'''written by WQG for different class'''
dists = dists_update(dists, strack_pool, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=self.args.match_thresh)
for itracked, idet in matches:
track = strack_pool[itracked]
det = detections[idet]
if track.state == TrackState.Tracked:
track.update(det, self.frame_id)
activated_stracks.append(track)
else:
track.re_activate(det, self.frame_id, new_id=False)
refind_stracks.append(track)
# Step 3: Second association, with low score detection boxes
# association the untrack to the low score detections
detections_second = self.init_track(dets_second, scores_second, cls_second, img)
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
# TODO
dists = matching.iou_distance(r_tracked_stracks, detections_second)
'''written by WQG for different class'''
dists = dists_update(dists, r_tracked_stracks, detections_second)
matches, u_track, u_detection_second = matching.linear_assignment(dists, thresh=0.5)
for itracked, idet in matches:
track = r_tracked_stracks[itracked]
det = detections_second[idet]
if track.state == TrackState.Tracked:
track.update(det, self.frame_id)
activated_stracks.append(track)
else:
track.re_activate(det, self.frame_id, new_id=False)
refind_stracks.append(track)
for it in u_track:
track = r_tracked_stracks[it]
if track.state != TrackState.Lost:
track.mark_lost()
lost_stracks.append(track)
# Deal with unconfirmed tracks, usually tracks with only one beginning frame
detections = [detections[i] for i in u_detection]
dists = self.get_dists_1(unconfirmed, detections)
'''written by WQG for different class'''
dists = dists_update(dists, unconfirmed, detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
unconfirmed[itracked].update(detections[idet], self.frame_id)
activated_stracks.append(unconfirmed[itracked])
for it in u_unconfirmed:
track = unconfirmed[it]
if self.frame_id - track.end_frame > 2: # Add by WQG
track.mark_removed()
removed_stracks.append(track)
# Step 4: Init new stracks
for inew in u_detection:
track = detections[inew]
if track.score < self.args.new_track_thresh:
continue
track.activate(self.kalman_filter, self.frame_id)
activated_stracks.append(track)
# Step 5: Update state
for track in self.lost_stracks:
if self.frame_id - track.end_frame > self.max_time_lost:
track.mark_removed()
removed_stracks.append(track)
self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked]
self.tracked_stracks = self.joint_stracks(self.tracked_stracks, activated_stracks)
self.tracked_stracks = self.joint_stracks(self.tracked_stracks, refind_stracks)
self.lost_stracks = self.sub_stracks(self.lost_stracks, self.tracked_stracks)
self.lost_stracks.extend(lost_stracks)
self.lost_stracks = self.sub_stracks(self.lost_stracks, self.removed_stracks)
self.tracked_stracks, self.lost_stracks = self.remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks)
self.removed_stracks.extend(removed_stracks)
if len(self.removed_stracks) > 1000:
self.removed_stracks = self.removed_stracks[-999:] # clip remove stracks to 1000 maximum
'''x.tlbr have update by function:
@property
def tlwh(self):
'''
##================ 原算法输出
# output = np.asarray([x.tlbr.tolist() + [x.track_id, x.score, x.cls, x.frame_id, x.idx]
# for x in self.tracked_stracks if x.is_activated], dtype=np.float32)
## ===== write by WQG
output1 = [x.tlwh_to_tlbr(x._tlwh).tolist() + [x.track_id, x.score, x.cls, x.frame_id, x.idx]
for x in self.tracked_stracks if x.is_activated]
output2 = [x.tlwh_to_tlbr(x._tlwh).tolist() + [x.track_id, x.score, x.cls, x.frame_id, x.idx]
for x in activated_stracks if x.first_find]
output = np.asarray(output1+output2, dtype=np.float32)
return output
def get_result(self):
'''written by WQG'''
# =============================================================================
# activate_tracks = np.asarray([x.tlbr.tolist() + [x.track_id, x.score, x.cls, x.idx]
# for x in self.tracked_stracks if x.is_activated], dtype=np.float32)
#
# track_features = []
# =============================================================================
tracks = []
feats = []
for t in self.tracked_stracks:
if t.is_activated:
track = t.tlbr.tolist() + [t.track_id, t.score, t.cls, t.idx]
feat = t.curr_feature
tracks.append(track)
feats.append(feat)
tracks = np.asarray(tracks, dtype=np.float32)
return (tracks, feats)
def get_kalmanfilter(self):
"""Returns a Kalman filter object for tracking bounding boxes."""
return KalmanFilterXYAH()
def init_track(self, dets, scores, cls, img=None):
"""Initialize object tracking with detections and scores using STrack algorithm."""
return [STrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)] if len(dets) else [] # detections
def get_dists(self, tracks, detections):
"""Calculates the distance between tracks and detections using IOU and fuses scores."""
dists = matching.iou_distance(tracks, detections)
# TODO: mot20
# if not self.args.mot20:
dists = matching.fuse_score(dists, detections)
return dists
def get_dists_1(self, tracks, detections):
"""Calculates the distance between tracks and detections using IOU and fuses scores."""
pass
def multi_predict(self, tracks):
"""Returns the predicted tracks using the YOLOv8 network."""
STrack.multi_predict(tracks)
def reset_id(self):
"""Resets the ID counter of STrack."""
STrack.reset_id()
@staticmethod
def joint_stracks(tlista, tlistb):
"""Combine two lists of stracks into a single one."""
exists = {}
res = []
for t in tlista:
exists[t.track_id] = 1
res.append(t)
for t in tlistb:
tid = t.track_id
if not exists.get(tid, 0):
exists[tid] = 1
res.append(t)
return res
@staticmethod
def sub_stracks(tlista, tlistb):
"""DEPRECATED CODE in https://github.com/ultralytics/ultralytics/pull/1890/
stracks = {t.track_id: t for t in tlista}
for t in tlistb:
tid = t.track_id
if stracks.get(tid, 0):
del stracks[tid]
return list(stracks.values())
"""
track_ids_b = {t.track_id for t in tlistb}
return [t for t in tlista if t.track_id not in track_ids_b]
@staticmethod
def remove_duplicate_stracks(stracksa, stracksb):
"""Remove duplicate stracks with non-maximum IOU distance."""
pdist = matching.iou_distance(stracksa, stracksb)
pairs = np.where(pdist < 0.15)
dupa, dupb = [], []
for p, q in zip(*pairs):
timep = stracksa[p].frame_id - stracksa[p].start_frame
timeq = stracksb[q].frame_id - stracksb[q].start_frame
if timep > timeq:
dupb.append(q)
else:
dupa.append(p)
resa = [t for i, t in enumerate(stracksa) if i not in dupa]
resb = [t for i, t in enumerate(stracksb) if i not in dupb]
return resa, resb

View File

@ -0,0 +1,18 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
# Default YOLO tracker settings for BoT-SORT tracker https://github.com/NirAharon/BoT-SORT
tracker_type: botsort # tracker type, ['botsort', 'bytetrack']
track_high_thresh: 0.5 # threshold for the first association
track_low_thresh: 0.1 # threshold for the second association
new_track_thresh: 0.6 # threshold for init new track if the detection does not match any tracks
track_buffer: 30 # buffer to calculate the time when to remove tracks
match_thresh: 0.8 # threshold for matching tracks
# min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now)
# mot20: False # for tracker evaluation(not used for now)
# BoT-SORT settings
gmc_method: sparseOptFlow # method of global motion compensation
# ReID model related thresh (not supported yet)
proximity_thresh: 0.5
appearance_thresh: 0.25
with_reid: True

View File

@ -0,0 +1,11 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
# Default YOLO tracker settings for ByteTrack tracker https://github.com/ifzhang/ByteTrack
tracker_type: bytetrack # tracker type, ['botsort', 'bytetrack']
track_high_thresh: 0.5 # threshold for the first association
track_low_thresh: 0.1 # threshold for the second association
new_track_thresh: 0.6 # threshold for init new track if the detection does not match any tracks
track_buffer: 30 # buffer to calculate the time when to remove tracks
match_thresh: 0.8 # threshold for matching tracks
# min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now)
# mot20: False # for tracker evaluation(not used for now)

View File

@ -0,0 +1,7 @@
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 19 16:15:35 2024
@author: ym
"""

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,42 @@
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 19 14:01:46 2024
@author: ym
"""
import torch
import os
# import torchvision.transforms as T
class Config:
# network settings
backbone = 'resnet18' # [resnet18, mobilevit_s, mobilenet_v2, mobilenetv3]
batch_size = 8
embedding_size = 256
img_size = 224
current_path = os.path.dirname(os.path.abspath(__file__))
model_path = os.path.join(current_path, r"ckpts\resnet18_1220\best.pth")
# model_path = "./trackers/reid/ckpts/resnet18_1220/best.pth"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# =============================================================================
# metric = 'arcface' # [cosface, arcface]
# drop_ratio = 0.5
#
# # training settings
# checkpoints = "checkpoints/Mobilev3Large_1225" # [resnet18, mobilevit_s, mobilenet_v2, mobilenetv3]
# restore = False
#
# test_model = "./checkpoints/resnet18_1220/best.pth"
#
#
#
#
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# pin_memory = True # if memory is large, set it True to speed up a bit
# num_workers = 4 # dataloader
# =============================================================================
config = Config()

View File

@ -0,0 +1,83 @@
import torch.nn as nn
import torchvision
from torch.nn import init
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.shape[0], -1)
class ChannelAttention(nn.Module):
def __int__(self,channel,reduction, num_layers):
super(ChannelAttention,self).__init__()
self.avgpool = nn.AdaptiveAvgPool2d(1)
gate_channels = [channel]
gate_channels += [len(channel)//reduction]*num_layers
gate_channels += [channel]
self.ca = nn.Sequential()
self.ca.add_module('flatten', Flatten())
for i in range(len(gate_channels)-2):
self.ca.add_module('',nn.Linear(gate_channels[i], gate_channels[i+1]))
self.ca.add_module('',nn.BatchNorm1d(gate_channels[i+1]))
self.ca.add_module('',nn.ReLU())
self.ca.add_module('',nn.Linear(gate_channels[-2], gate_channels[-1]))
def forward(self, x):
res = self.avgpool(x)
res = self.ca(res)
res = res.unsqueeze(-1).unsqueeze(-1).expand_as(x)
return res
class SpatialAttention(nn.Module):
def __int__(self, channel,reduction=16,num_lay=3,dilation=2):
super(SpatialAttention).__init__()
self.sa = nn.Sequential()
self.sa.add_module('', nn.Conv2d(kernel_size=1, in_channels=channel, out_channels=(channel//reduction)*3))
self.sa.add_module('',nn.BatchNorm2d(num_features=(channel//reduction)))
self.sa.add_module('',nn.ReLU())
for i in range(num_lay):
self.sa.add_module('', nn.Conv2d(kernel_size=3,
in_channels=(channel//reduction),
out_channels=(channel//reduction),
padding=1,
dilation= 2))
self.sa.add_module('',nn.BatchNorm2d(channel//reduction))
self.sa.add_module('',nn.ReLU())
self.sa.add_module('',nn.Conv2d(channel//reduction, 1, kernel_size=1))
def forward(self,x):
res = self.sa(x)
res = res.expand_as(x)
return res
class BAMblock(nn.Module):
def __init__(self,channel=512, reduction=16, dia_val=2):
super(BAMblock, self).__init__()
self.ca = ChannelAttention(channel, reduction)
self.sa = SpatialAttention(channel,reduction,dia_val)
self.sigmoid = nn.Sigmoid()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bais is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self,x):
b, c, _, _ = x.size()
sa_out=self.sa(x)
ca_out=self.ca(x)
weight=self.sigmoid(sa_out+ca_out)
out=(1+weight)*x
return out
if __name__ =="__main__":
print(512//14)

View File

@ -0,0 +1,68 @@
import torch
import torch.nn as nn
import torch.nn.init as init
class channelAttention(nn.Module):
def __init__(self, channel, reduction=16):
super(channelAttention, self).__init__()
self.Maxpooling = nn.AdaptiveMaxPool2d(1)
self.Avepooling = nn.AdaptiveAvgPool2d(1)
self.ca = nn.Sequential()
self.ca.add_module('conv1',nn.Conv2d(channel, channel//reduction, 1, bias=False))
self.ca.add_module('Relu', nn.ReLU())
self.ca.add_module('conv2',nn.Conv2d(channel//reduction, channel, 1, bias=False))
self.sigmod = nn.Sigmoid()
def forward(self, x):
M_out = self.Maxpooling(x)
A_out = self.Avepooling(x)
M_out = self.ca(M_out)
A_out = self.ca(A_out)
out = self.sigmod(M_out+A_out)
return out
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super().__init__()
self.conv = nn.Conv2d(in_channels=2, out_channels=1, kernel_size=kernel_size, padding=kernel_size // 2)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
max_result, _ = torch.max(x, dim=1, keepdim=True)
avg_result = torch.mean(x, dim=1, keepdim=True)
result = torch.cat([max_result, avg_result], dim=1)
output = self.conv(result)
output = self.sigmoid(output)
return output
class CBAM(nn.Module):
def __init__(self, channel=512, reduction=16, kernel_size=7):
super().__init__()
self.ca = channelAttention(channel, reduction)
self.sa = SpatialAttention(kernel_size)
def init_weights(self):
for m in self.modules():#权重初始化
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, x):
# b,c_,_ = x.size()
# residual = x
out = x*self.ca(x)
out = out*self.sa(out)
return out
if __name__ == '__main__':
input=torch.randn(50,512,7,7)
kernel_size=input.shape[2]
cbam = CBAM(channel=512,reduction=16,kernel_size=kernel_size)
output=cbam(input)
print(output.shape)

View File

@ -0,0 +1,33 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
class GeM(nn.Module):
def __init__(self, p=3, eps=1e-6):
super(GeM, self).__init__()
self.p = nn.Parameter(torch.ones(1) * p)
self.eps = eps
def forward(self, x):
return self.gem(x, p=self.p, eps=self.eps, stride = 2)
def gem(self, x, p=3, eps=1e-6, stride = 2):
return F.avg_pool2d(x.clamp(min=eps).pow(p), (x.size(-2), x.size(-1)), stride=2).pow(1. / p)
def __repr__(self):
return self.__class__.__name__ + \
'(' + 'p=' + '{:.4f}'.format(self.p.data.tolist()[0]) + \
', ' + 'eps=' + str(self.eps) + ')'
class TripletLoss(nn.Module):
def __init__(self, margin):
super(TripletLoss, self).__init__()
self.margin = margin
def forward(self, anchor, positive, negative, size_average = True):
distance_positive = (anchor-positive).pow(2).sum(1)
distance_negative = (anchor-negative).pow(2).sum(1)
losses = F.relu(distance_negative-distance_positive+self.margin)
return losses.mean() if size_average else losses.sum()
if __name__ == '__main__':
print('')

View File

@ -0,0 +1,9 @@
from .fmobilenet import FaceMobileNet
from .resnet_face import ResIRSE
from .mobilevit import mobilevit_s
from .metric import ArcFace, CosFace
from .loss import FocalLoss
from .resbam import resnet
from .resnet_pre import resnet18, resnet34, resnet50
from .mobilenet_v2 import mobilenet_v2
from .mobilenet_v3 import MobileNetV3_Small, MobileNetV3_Large

View File

@ -0,0 +1,124 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.shape[0], -1)
class ConvBn(nn.Module):
def __init__(self, in_c, out_c, kernel=(1, 1), stride=1, padding=0, groups=1):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(in_c, out_c, kernel, stride, padding, groups=groups, bias=False),
nn.BatchNorm2d(out_c)
)
def forward(self, x):
return self.net(x)
class ConvBnPrelu(nn.Module):
def __init__(self, in_c, out_c, kernel=(1, 1), stride=1, padding=0, groups=1):
super().__init__()
self.net = nn.Sequential(
ConvBn(in_c, out_c, kernel, stride, padding, groups),
nn.PReLU(out_c)
)
def forward(self, x):
return self.net(x)
class DepthWise(nn.Module):
def __init__(self, in_c, out_c, kernel=(3, 3), stride=2, padding=1, groups=1):
super().__init__()
self.net = nn.Sequential(
ConvBnPrelu(in_c, groups, kernel=(1, 1), stride=1, padding=0),
ConvBnPrelu(groups, groups, kernel=kernel, stride=stride, padding=padding, groups=groups),
ConvBn(groups, out_c, kernel=(1, 1), stride=1, padding=0),
)
def forward(self, x):
return self.net(x)
class DepthWiseRes(nn.Module):
"""DepthWise with Residual"""
def __init__(self, in_c, out_c, kernel=(3, 3), stride=2, padding=1, groups=1):
super().__init__()
self.net = DepthWise(in_c, out_c, kernel, stride, padding, groups)
def forward(self, x):
return self.net(x) + x
class MultiDepthWiseRes(nn.Module):
def __init__(self, num_block, channels, kernel=(3, 3), stride=1, padding=1, groups=1):
super().__init__()
self.net = nn.Sequential(*[
DepthWiseRes(channels, channels, kernel, stride, padding, groups)
for _ in range(num_block)
])
def forward(self, x):
return self.net(x)
class FaceMobileNet(nn.Module):
def __init__(self, embedding_size):
super().__init__()
self.conv1 = ConvBnPrelu(1, 64, kernel=(3, 3), stride=2, padding=1)
self.conv2 = ConvBn(64, 64, kernel=(3, 3), stride=1, padding=1, groups=64)
self.conv3 = DepthWise(64, 64, kernel=(3, 3), stride=2, padding=1, groups=128)
self.conv4 = MultiDepthWiseRes(num_block=4, channels=64, kernel=3, stride=1, padding=1, groups=128)
self.conv5 = DepthWise(64, 128, kernel=(3, 3), stride=2, padding=1, groups=256)
self.conv6 = MultiDepthWiseRes(num_block=6, channels=128, kernel=(3, 3), stride=1, padding=1, groups=256)
self.conv7 = DepthWise(128, 128, kernel=(3, 3), stride=2, padding=1, groups=512)
self.conv8 = MultiDepthWiseRes(num_block=2, channels=128, kernel=(3, 3), stride=1, padding=1, groups=256)
self.conv9 = ConvBnPrelu(128, 512, kernel=(1, 1))
self.conv10 = ConvBn(512, 512, groups=512, kernel=(7, 7))
self.flatten = Flatten()
self.linear = nn.Linear(2048, embedding_size, bias=False)
self.bn = nn.BatchNorm1d(embedding_size)
def forward(self, x):
#print('x',x.shape)
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
out = self.conv4(out)
out = self.conv5(out)
out = self.conv6(out)
out = self.conv7(out)
out = self.conv8(out)
out = self.conv9(out)
out = self.conv10(out)
out = self.flatten(out)
out = self.linear(out)
out = self.bn(out)
return out
if __name__ == "__main__":
from PIL import Image
import numpy as np
x = Image.open("../samples/009.jpg").convert('L')
x = x.resize((128, 128))
x = np.asarray(x, dtype=np.float32)
x = x[None, None, ...]
x = torch.from_numpy(x)
net = FaceMobileNet(512)
net.eval()
with torch.no_grad():
out = net(x)
print(out.shape)

View File

@ -0,0 +1,18 @@
import torch
import torch.nn as nn
class FocalLoss(nn.Module):
def __init__(self, gamma=2):
super().__init__()
self.gamma = gamma
self.ce = torch.nn.CrossEntropyLoss()
def forward(self, input, target):
#print(f'theta {input.shape, input[0]}, target {target.shape, target}')
logp = self.ce(input, target)
p = torch.exp(-logp)
loss = (1 - p) ** self.gamma * logp
return loss.mean()

View File

@ -0,0 +1,83 @@
# Definition of ArcFace loss and CosFace loss
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class ArcFace(nn.Module):
def __init__(self, embedding_size, class_num, s=30.0, m=0.50):
"""ArcFace formula:
cos(m + theta) = cos(m)cos(theta) - sin(m)sin(theta)
Note that:
0 <= m + theta <= Pi
So if (m + theta) >= Pi, then theta >= Pi - m. In [0, Pi]
we have:
cos(theta) < cos(Pi - m)
So we can use cos(Pi - m) as threshold to check whether
(m + theta) go out of [0, Pi]
Args:
embedding_size: usually 128, 256, 512 ...
class_num: num of people when training
s: scale, see normface https://arxiv.org/abs/1704.06369
m: margin, see SphereFace, CosFace, and ArcFace paper
"""
super().__init__()
self.in_features = embedding_size
self.out_features = class_num
self.s = s
self.m = m
self.weight = nn.Parameter(torch.FloatTensor(class_num, embedding_size))
nn.init.xavier_uniform_(self.weight)
self.cos_m = math.cos(m)
self.sin_m = math.sin(m)
self.th = math.cos(math.pi - m)
self.mm = math.sin(math.pi - m) * m
def forward(self, input, label):
#print(f"embding {self.in_features}, class_num {self.out_features}, input {len(input)}, label {len(label)}")
cosine = F.linear(F.normalize(input), F.normalize(self.weight))
# print('F.normalize(input)',input.shape)
# print('F.normalize(self.weight)',F.normalize(self.weight).shape)
sine = ((1.0 - cosine.pow(2)).clamp(0, 1)).sqrt()
phi = cosine * self.cos_m - sine * self.sin_m
phi = torch.where(cosine > self.th, phi, cosine - self.mm) # drop to CosFace
#print(f'consine {cosine.shape, cosine}, sine {sine.shape, sine}, phi {phi.shape, phi}')
# update y_i by phi in cosine
output = cosine * 1.0 # make backward works
batch_size = len(output)
output[range(batch_size), label] = phi[range(batch_size), label]
# print(f'output {(output * self.s).shape}')
# print(f'phi[range(batch_size), label] {phi[range(batch_size), label]}')
return output * self.s
class CosFace(nn.Module):
def __init__(self, in_features, out_features, s=30.0, m=0.40):
"""
Args:
embedding_size: usually 128, 256, 512 ...
class_num: num of people when training
s: scale, see normface https://arxiv.org/abs/1704.06369
m: margin, see SphereFace, CosFace, and ArcFace paper
"""
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.s = s
self.m = m
self.weight = nn.Parameter(torch.FloatTensor(out_features, in_features))
nn.init.xavier_uniform_(self.weight)
def forward(self, input, label):
cosine = F.linear(F.normalize(input), F.normalize(self.weight))
phi = cosine - self.m
output = cosine * 1.0 # make backward works
batch_size = len(output)
output[range(batch_size), label] = phi[range(batch_size), label]
return output * self.s

View File

@ -0,0 +1,200 @@
from torch import nn
from .utils import load_state_dict_from_url
from ..config import config as conf
__all__ = ['MobileNetV2', 'mobilenet_v2']
model_urls = {
'mobilenet_v2': 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth',
}
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class ConvBNReLU(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1, norm_layer=None):
padding = (kernel_size - 1) // 2
if norm_layer is None:
norm_layer = nn.BatchNorm2d
super(ConvBNReLU, self).__init__(
nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
norm_layer(out_planes),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio, norm_layer=None):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
if norm_layer is None:
norm_layer = nn.BatchNorm2d
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1, norm_layer=norm_layer))
layers.extend([
# dw
ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim, norm_layer=norm_layer),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
norm_layer(oup),
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(self,
num_classes=conf.embedding_size,
width_mult=1.0,
inverted_residual_setting=None,
round_nearest=8,
block=None,
norm_layer=None):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
block: Module specifying inverted residual building block for mobilenet
norm_layer: Module specifying the normalization layer to use
"""
super(MobileNetV2, self).__init__()
if block is None:
block = InvertedResidual
if norm_layer is None:
norm_layer = nn.BatchNorm2d
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features = [ConvBNReLU(3, input_channel, stride=2, norm_layer=norm_layer)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t, norm_layer=norm_layer))
input_channel = output_channel
# building last several layers
features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1, norm_layer=norm_layer))
# make it nn.Sequential
self.features = nn.Sequential(*features)
# building classifier
self.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(self.last_channel, num_classes),
)
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def _forward_impl(self, x):
# This exists since TorchScript doesn't support inheritance, so the superclass method
# (this one) needs to have a name other than `forward` that can be accessed in a subclass
x = self.features(x)
# Cannot use "squeeze" as batch-size can be 1 => must use reshape with x.shape[0]
x = nn.functional.adaptive_avg_pool2d(x, 1).reshape(x.shape[0], -1)
x = self.classifier(x)
return x
def forward(self, x):
return self._forward_impl(x)
def mobilenet_v2(pretrained=True, progress=True, **kwargs):
"""
Constructs a MobileNetV2 architecture from
`"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = MobileNetV2(**kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'],
progress=progress)
src_state_dict = state_dict
target_state_dict = model.state_dict()
skip_keys = []
# skip mismatch size tensors in case of pretraining
for k in src_state_dict.keys():
if k not in target_state_dict:
continue
if src_state_dict[k].size() != target_state_dict[k].size():
skip_keys.append(k)
for k in skip_keys:
del src_state_dict[k]
missing_keys, unexpected_keys = model.load_state_dict(src_state_dict, strict=False)
#.load_state_dict(state_dict)
return model

View File

@ -0,0 +1,200 @@
'''MobileNetV3 in PyTorch.
See the paper "Inverted Residuals and Linear Bottlenecks:
Mobile Networks for Classification, Detection and Segmentation" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from ..config import config as conf
class hswish(nn.Module):
def forward(self, x):
out = x * F.relu6(x + 3, inplace=True) / 6
return out
class hsigmoid(nn.Module):
def forward(self, x):
out = F.relu6(x + 3, inplace=True) / 6
return out
class SeModule(nn.Module):
def __init__(self, in_size, reduction=4):
super(SeModule, self).__init__()
self.se = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_size, in_size // reduction, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(in_size // reduction),
nn.ReLU(inplace=True),
nn.Conv2d(in_size // reduction, in_size, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(in_size),
hsigmoid()
)
def forward(self, x):
return x * self.se(x)
class Block(nn.Module):
'''expand + depthwise + pointwise'''
def __init__(self, kernel_size, in_size, expand_size, out_size, nolinear, semodule, stride):
super(Block, self).__init__()
self.stride = stride
self.se = semodule
self.conv1 = nn.Conv2d(in_size, expand_size, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(expand_size)
self.nolinear1 = nolinear
self.conv2 = nn.Conv2d(expand_size, expand_size, kernel_size=kernel_size, stride=stride, padding=kernel_size//2, groups=expand_size, bias=False)
self.bn2 = nn.BatchNorm2d(expand_size)
self.nolinear2 = nolinear
self.conv3 = nn.Conv2d(expand_size, out_size, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(out_size)
self.shortcut = nn.Sequential()
if stride == 1 and in_size != out_size:
self.shortcut = nn.Sequential(
nn.Conv2d(in_size, out_size, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_size),
)
def forward(self, x):
out = self.nolinear1(self.bn1(self.conv1(x)))
out = self.nolinear2(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
if self.se != None:
out = self.se(out)
out = out + self.shortcut(x) if self.stride==1 else out
return out
class MobileNetV3_Large(nn.Module):
def __init__(self, num_classes=conf.embedding_size):
super(MobileNetV3_Large, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.hs1 = hswish()
self.bneck = nn.Sequential(
Block(3, 16, 16, 16, nn.ReLU(inplace=True), None, 1),
Block(3, 16, 64, 24, nn.ReLU(inplace=True), None, 2),
Block(3, 24, 72, 24, nn.ReLU(inplace=True), None, 1),
Block(5, 24, 72, 40, nn.ReLU(inplace=True), SeModule(40), 2),
Block(5, 40, 120, 40, nn.ReLU(inplace=True), SeModule(40), 1),
Block(5, 40, 120, 40, nn.ReLU(inplace=True), SeModule(40), 1),
Block(3, 40, 240, 80, hswish(), None, 2),
Block(3, 80, 200, 80, hswish(), None, 1),
Block(3, 80, 184, 80, hswish(), None, 1),
Block(3, 80, 184, 80, hswish(), None, 1),
Block(3, 80, 480, 112, hswish(), SeModule(112), 1),
Block(3, 112, 672, 112, hswish(), SeModule(112), 1),
Block(5, 112, 672, 160, hswish(), SeModule(160), 1),
Block(5, 160, 672, 160, hswish(), SeModule(160), 2),
Block(5, 160, 960, 160, hswish(), SeModule(160), 1),
)
self.conv2 = nn.Conv2d(160, 960, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(960)
self.hs2 = hswish()
self.linear3 = nn.Linear(960, 1280)
self.bn3 = nn.BatchNorm1d(1280)
self.hs3 = hswish()
self.linear4 = nn.Linear(1280, num_classes)
self.init_params()
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, x):
out = self.hs1(self.bn1(self.conv1(x)))
out = self.bneck(out)
out = self.hs2(self.bn2(self.conv2(out)))
out = F.avg_pool2d(out, conf.img_size // 32)
out = out.view(out.size(0), -1)
out = self.hs3(self.bn3(self.linear3(out)))
out = self.linear4(out)
return out
class MobileNetV3_Small(nn.Module):
def __init__(self, num_classes=conf.embedding_size):
super(MobileNetV3_Small, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.hs1 = hswish()
self.bneck = nn.Sequential(
Block(3, 16, 16, 16, nn.ReLU(inplace=True), SeModule(16), 2),
Block(3, 16, 72, 24, nn.ReLU(inplace=True), None, 2),
Block(3, 24, 88, 24, nn.ReLU(inplace=True), None, 1),
Block(5, 24, 96, 40, hswish(), SeModule(40), 2),
Block(5, 40, 240, 40, hswish(), SeModule(40), 1),
Block(5, 40, 240, 40, hswish(), SeModule(40), 1),
Block(5, 40, 120, 48, hswish(), SeModule(48), 1),
Block(5, 48, 144, 48, hswish(), SeModule(48), 1),
Block(5, 48, 288, 96, hswish(), SeModule(96), 2),
Block(5, 96, 576, 96, hswish(), SeModule(96), 1),
Block(5, 96, 576, 96, hswish(), SeModule(96), 1),
)
self.conv2 = nn.Conv2d(96, 576, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(576)
self.hs2 = hswish()
self.linear3 = nn.Linear(576, 1280)
self.bn3 = nn.BatchNorm1d(1280)
self.hs3 = hswish()
self.linear4 = nn.Linear(1280, num_classes)
self.init_params()
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, x):
out = self.hs1(self.bn1(self.conv1(x)))
out = self.bneck(out)
out = self.hs2(self.bn2(self.conv2(out)))
out = F.avg_pool2d(out, conf.img_size // 32)
out = out.view(out.size(0), -1)
out = self.hs3(self.bn3(self.linear3(out)))
out = self.linear4(out)
return out
def test():
net = MobileNetV3_Small()
x = torch.randn(2,3,224,224)
y = net(x)
print(y.size())
# test()

View File

@ -0,0 +1,265 @@
import torch
import torch.nn as nn
from einops import rearrange
from ..config import config as conf
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.SiLU()
)
def conv_nxn_bn(inp, oup, kernal_size=3, stride=1):
return nn.Sequential(
nn.Conv2d(inp, oup, kernal_size, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.SiLU()
)
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout=0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.SiLU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, heads=8, dim_head=64, dropout=0.):
super().__init__()
inner_dim = dim_head * heads
project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head ** -0.5
self.attend = nn.Softmax(dim=-1)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias=False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
) if project_out else nn.Identity()
def forward(self, x):
qkv = self.to_qkv(x).chunk(3, dim=-1)
q, k, v = map(lambda t: rearrange(t, 'b p n (h d) -> b p h n d', h=self.heads), qkv)
dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale
attn = self.attend(dots)
out = torch.matmul(attn, v)
out = rearrange(out, 'b p h n d -> b p n (h d)')
return self.to_out(out)
class Transformer(nn.Module):
def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout=0.):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim, heads, dim_head, dropout)),
PreNorm(dim, FeedForward(dim, mlp_dim, dropout))
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return x
class MV2Block(nn.Module):
def __init__(self, inp, oup, stride=1, expansion=4):
super().__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(inp * expansion)
self.use_res_connect = self.stride == 1 and inp == oup
if expansion == 1:
self.conv = nn.Sequential(
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.SiLU(),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.SiLU(),
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.SiLU(),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileViTBlock(nn.Module):
def __init__(self, dim, depth, channel, kernel_size, patch_size, mlp_dim, dropout=0.):
super().__init__()
self.ph, self.pw = patch_size
self.conv1 = conv_nxn_bn(channel, channel, kernel_size)
self.conv2 = conv_1x1_bn(channel, dim)
self.transformer = Transformer(dim, depth, 4, 8, mlp_dim, dropout)
self.conv3 = conv_1x1_bn(dim, channel)
self.conv4 = conv_nxn_bn(2 * channel, channel, kernel_size)
def forward(self, x):
y = x.clone()
# Local representations
x = self.conv1(x)
x = self.conv2(x)
# Global representations
_, _, h, w = x.shape
x = rearrange(x, 'b d (h ph) (w pw) -> b (ph pw) (h w) d', ph=self.ph, pw=self.pw)
x = self.transformer(x)
x = rearrange(x, 'b (ph pw) (h w) d -> b d (h ph) (w pw)', h=h // self.ph, w=w // self.pw, ph=self.ph,
pw=self.pw)
# Fusion
x = self.conv3(x)
x = torch.cat((x, y), 1)
x = self.conv4(x)
return x
class MobileViT(nn.Module):
def __init__(self, image_size, dims, channels, num_classes, expansion=4, kernel_size=3, patch_size=(2, 2)):
super().__init__()
ih, iw = image_size
ph, pw = patch_size
assert ih % ph == 0 and iw % pw == 0
L = [2, 4, 3]
self.conv1 = conv_nxn_bn(3, channels[0], stride=2)
self.mv2 = nn.ModuleList([])
self.mv2.append(MV2Block(channels[0], channels[1], 1, expansion))
self.mv2.append(MV2Block(channels[1], channels[2], 2, expansion))
self.mv2.append(MV2Block(channels[2], channels[3], 1, expansion))
self.mv2.append(MV2Block(channels[2], channels[3], 1, expansion)) # Repeat
self.mv2.append(MV2Block(channels[3], channels[4], 2, expansion))
self.mv2.append(MV2Block(channels[5], channels[6], 2, expansion))
self.mv2.append(MV2Block(channels[7], channels[8], 2, expansion))
self.mvit = nn.ModuleList([])
self.mvit.append(MobileViTBlock(dims[0], L[0], channels[5], kernel_size, patch_size, int(dims[0] * 2)))
self.mvit.append(MobileViTBlock(dims[1], L[1], channels[7], kernel_size, patch_size, int(dims[1] * 4)))
self.mvit.append(MobileViTBlock(dims[2], L[2], channels[9], kernel_size, patch_size, int(dims[2] * 4)))
self.conv2 = conv_1x1_bn(channels[-2], channels[-1])
self.pool = nn.AvgPool2d(ih // 32, 1)
self.fc = nn.Linear(channels[-1], num_classes, bias=False)
def forward(self, x):
#print('x',x.shape)
x = self.conv1(x)
x = self.mv2[0](x)
x = self.mv2[1](x)
x = self.mv2[2](x)
x = self.mv2[3](x) # Repeat
x = self.mv2[4](x)
x = self.mvit[0](x)
x = self.mv2[5](x)
x = self.mvit[1](x)
x = self.mv2[6](x)
x = self.mvit[2](x)
x = self.conv2(x)
#print('pool_before',x.shape)
x = self.pool(x).view(-1, x.shape[1])
#print('self_pool',self.pool)
#print('pool_after',x.shape)
x = self.fc(x)
return x
def mobilevit_xxs():
dims = [64, 80, 96]
channels = [16, 16, 24, 24, 48, 48, 64, 64, 80, 80, 320]
return MobileViT((256, 256), dims, channels, num_classes=1000, expansion=2)
def mobilevit_xs():
dims = [96, 120, 144]
channels = [16, 32, 48, 48, 64, 64, 80, 80, 96, 96, 384]
return MobileViT((256, 256), dims, channels, num_classes=1000)
def mobilevit_s():
dims = [144, 192, 240]
channels = [16, 32, 64, 64, 96, 96, 128, 128, 160, 160, 640]
return MobileViT((conf.img_size, conf.img_size), dims, channels, num_classes=conf.embedding_size)
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
if __name__ == '__main__':
img = torch.randn(5, 3, 256, 256)
vit = mobilevit_xxs()
out = vit(img)
print(out.shape)
print(count_parameters(vit))
vit = mobilevit_xs()
out = vit(img)
print(out.shape)
print(count_parameters(vit))
vit = mobilevit_s()
out = vit(img)
print(out.shape)
print(count_parameters(vit))

View File

@ -0,0 +1,134 @@
from .CBAM import CBAM
import torch
import torch.nn as nn
from .Tool import GeM as gem
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inchannel, outchannel,stride =1,dowsample=None):
# super(Bottleneck, self).__init__()
super().__init__()
self.conv1 = nn.Conv2d(in_channels=inchannel,out_channels=outchannel, kernel_size=1, stride=1, bias=False)
self.bn1 = nn.BatchNorm2d(outchannel)
self.conv2 = nn.Conv2d(in_channels=outchannel, out_channels=outchannel,kernel_size=3,bias=False, stride=stride,padding=1)
self.bn2 = nn.BatchNorm2d(outchannel)
self.conv3 =nn.Conv2d(in_channels=outchannel, out_channels=outchannel*self.expansion,stride=1,bias=False,kernel_size=1)
self.bn3 = nn.BatchNorm2d(outchannel*self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = dowsample
def forward(self, x):
self.identity = x
# print('>>>>>>>>',type(x))
if self.downsample is not None:
# print('>>>>downsample>>>>', type(self.downsample))
self.identity = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
# print('>>>>out>>>identity',out.size(),self.identity.size())
out = out+self.identity
out = self.relu(out)
return out
class resnet(nn.Module):
def __init__(self,block=Bottleneck, block_num=[3,4,6,3], num_class=1000):
super().__init__()
self.in_channel = 64
self.conv1 = nn.Conv2d(in_channels=3,
out_channels=self.in_channel,
stride=2,
kernel_size=7,
padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(self.in_channel)
self.relu = nn.ReLU(inplace=True)
self.cbam = CBAM(self.in_channel)
self.cbam1 = CBAM(2048)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, block_num[0],stride=1)
self.layer2 = self._make_layer(block, 128, block_num[1],stride=2)
self.layer3 = self._make_layer(block, 256, block_num[2],stride=2)
self.layer4 = self._make_layer(block, 512, block_num[3],stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1,1))
self.gem = gem()
self.fc = nn.Linear(512*block.expansion, num_class)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal(m.weight,mode = 'fan_out',
nonlinearity='relu')
if isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 1.0)
def _make_layer(self,block ,channel, block_num, stride=1):
downsample = None
if stride !=1 or self.in_channel != channel*block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.in_channel, channel*block.expansion,kernel_size=1,stride=stride,bias=False),
nn.BatchNorm2d(channel*block.expansion))
layer = []
layer.append(block(self.in_channel, channel, stride, downsample))
self.in_channel = channel*block.expansion
for _ in range(1, block_num):
layer.append(block(self.in_channel, channel))
return nn.Sequential(*layer)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.cbam(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.cbam1(x)
# x = self.avgpool(x)
x = self.gem(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
class TripletNet(nn.Module):
def __init__(self, num_class, flag=True):
super(TripletNet, self).__init__()
self.initnet = rescbam(num_class)
self.flag = flag
def forward(self, x1, x2=None, x3=None):
if self.flag:
output1 = self.initnet(x1)
output2 = self.initnet(x2)
output3 = self.initnet(x3)
return output1, output2, output3
else:
output = self.initnet(x1)
return output
def rescbam(num_class):
return resnet(block=Bottleneck, block_num=[3,4,6,3],num_class=num_class)
if __name__ =='__main__':
input1 = torch.randn(4,3,640,640)
input2 = torch.randn(4,3,640,640)
input3 = torch.randn(4,3,640,640)
#rescbam测试
# Resnet50 = rescbam(512)
# output = Resnet50.forward(input1)
# print(Resnet50)
#trnet测试
trnet = TripletNet(512)
output = trnet(input1, input2, input3)
print(output)

View File

@ -0,0 +1,182 @@
"""resnet in pytorch
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
Deep Residual Learning for Image Recognition
https://arxiv.org/abs/1512.03385v1
"""
import torch
import torch.nn as nn
from config import config as conf
class BasicBlock(nn.Module):
"""Basic Block for resnet 18 and resnet 34
"""
#BasicBlock and BottleNeck block
#have different output size
#we use class attribute expansion
#to distinct
expansion = 1
def __init__(self, in_channels, out_channels, stride=1):
super().__init__()
#residual function
self.residual_function = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels * BasicBlock.expansion, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels * BasicBlock.expansion)
)
#shortcut
self.shortcut = nn.Sequential()
#the shortcut output dimension is not the same with residual function
#use 1*1 convolution to match the dimension
if stride != 1 or in_channels != BasicBlock.expansion * out_channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, out_channels * BasicBlock.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels * BasicBlock.expansion)
)
def forward(self, x):
return nn.ReLU(inplace=True)(self.residual_function(x) + self.shortcut(x))
class BottleNeck(nn.Module):
"""Residual block for resnet over 50 layers
"""
expansion = 4
def __init__(self, in_channels, out_channels, stride=1):
super().__init__()
self.residual_function = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, stride=stride, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels * BottleNeck.expansion, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels * BottleNeck.expansion),
)
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != out_channels * BottleNeck.expansion:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, out_channels * BottleNeck.expansion, stride=stride, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels * BottleNeck.expansion)
)
def forward(self, x):
return nn.ReLU(inplace=True)(self.residual_function(x) + self.shortcut(x))
class ResNet(nn.Module):
def __init__(self, block, num_block, num_classes=conf.embedding_size):
super().__init__()
self.in_channels = 64
# self.conv1 = nn.Sequential(
# nn.Conv2d(3, 64, kernel_size=3, padding=1, bias=False),
# nn.BatchNorm2d(64),
# nn.ReLU(inplace=True))
self.conv1 = nn.Sequential(
nn.Conv2d(3, 64,stride=2,kernel_size=7,padding=3,bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
#we use a different inputsize than the original paper
#so conv2_x's stride is 1
self.conv2_x = self._make_layer(block, 64, num_block[0], 1)
self.conv3_x = self._make_layer(block, 128, num_block[1], 2)
self.conv4_x = self._make_layer(block, 256, num_block[2], 2)
self.conv5_x = self._make_layer(block, 512, num_block[3], 2)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal(m.weight,mode = 'fan_out',
nonlinearity='relu')
if isinstance(m, (nn.BatchNorm2d)):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 1.0)
def _make_layer(self, block, out_channels, num_blocks, stride):
"""make resnet layers(by layer i didnt mean this 'layer' was the
same as a neuron netowork layer, ex. conv layer), one layer may
contain more than one residual block
Args:
block: block type, basic block or bottle neck block
out_channels: output depth channel number of this layer
num_blocks: how many blocks per layer
stride: the stride of the first block of this layer
Return:
return a resnet layer
"""
# we have num_block blocks per layer, the first block
# could be 1 or 2, other blocks would always be 1
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_channels, out_channels, stride))
self.in_channels = out_channels * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
output = self.conv1(x)
output = self.conv2_x(output)
output = self.conv3_x(output)
output = self.conv4_x(output)
output = self.conv5_x(output)
print('pollBefore',output.shape)
output = self.avg_pool(output)
print('poolAfter',output.shape)
output = output.view(output.size(0), -1)
print('fcBefore',output.shape)
output = self.fc(output)
return output
def resnet18():
""" return a ResNet 18 object
"""
return ResNet(BasicBlock, [2, 2, 2, 2])
def resnet34():
""" return a ResNet 34 object
"""
return ResNet(BasicBlock, [3, 4, 6, 3])
def resnet50():
""" return a ResNet 50 object
"""
return ResNet(BottleNeck, [3, 4, 6, 3])
def resnet101():
""" return a ResNet 101 object
"""
return ResNet(BottleNeck, [3, 4, 23, 3])
def resnet152():
""" return a ResNet 152 object
"""
return ResNet(BottleNeck, [3, 8, 36, 3])

View File

@ -0,0 +1,120 @@
""" Resnet_IR_SE in ArcFace """
import torch
import torch.nn as nn
import torch.nn.functional as F
class Flatten(nn.Module):
def forward(self, x):
return x.reshape(x.shape[0], -1)
class SEConv(nn.Module):
"""Use Convolution instead of FullyConnection in SE"""
def __init__(self, channels, reduction):
super().__init__()
self.net = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(channels, channels // reduction, kernel_size=1, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(channels // reduction, channels, kernel_size=1, bias=False),
nn.Sigmoid(),
)
def forward(self, x):
return self.net(x) * x
class SE(nn.Module):
def __init__(self, channels, reduction):
super().__init__()
self.net = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Linear(channels, channels // reduction),
nn.ReLU(inplace=True),
nn.Linear(channels // reduction, channels),
nn.Sigmoid(),
)
def forward(self, x):
return self.net(x) * x
class IRSE(nn.Module):
def __init__(self, channels, depth, stride):
super().__init__()
if channels == depth:
self.shortcut = nn.MaxPool2d(kernel_size=1, stride=stride)
else:
self.shortcut = nn.Sequential(
nn.Conv2d(channels, depth, (1, 1), stride, bias=False),
nn.BatchNorm2d(depth),
)
self.residual = nn.Sequential(
nn.BatchNorm2d(channels),
nn.Conv2d(channels, depth, (3, 3), 1, 1, bias=False),
nn.PReLU(depth),
nn.Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
nn.BatchNorm2d(depth),
SEConv(depth, 16),
)
def forward(self, x):
return self.shortcut(x) + self.residual(x)
class ResIRSE(nn.Module):
"""Resnet50-IRSE backbone"""
def __init__(self, ih,embedding_size, drop_ratio):
super().__init__()
ih_last = ih // 16
self.input_layer = nn.Sequential(
nn.Conv2d(3, 64, (3, 3), 1, 1, bias=False),
nn.BatchNorm2d(64),
nn.PReLU(64),
)
self.output_layer = nn.Sequential(
nn.BatchNorm2d(512),
nn.Dropout(drop_ratio),
Flatten(),
nn.Linear(512 * ih_last * ih_last, embedding_size),
nn.BatchNorm1d(embedding_size),
)
# ["channels", "depth", "stride"],
self.res50_arch = [
[64, 64, 2], [64, 64, 1], [64, 64, 1],
[64, 128, 2], [128, 128, 1], [128, 128, 1], [128, 128, 1],
[128, 256, 2], [256, 256, 1], [256, 256, 1], [256, 256, 1], [256, 256, 1],
[256, 256, 1], [256, 256, 1], [256, 256, 1], [256, 256, 1], [256, 256, 1],
[256, 256, 1], [256, 256, 1], [256, 256, 1], [256, 256, 1],
[256, 512, 2], [512, 512, 1], [512, 512, 1],
]
self.body = nn.Sequential(*[ IRSE(a,b,c) for (a,b,c) in self.res50_arch ])
def forward(self, x):
x = self.input_layer(x)
x = self.body(x)
x = self.output_layer(x)
return x
if __name__ == "__main__":
from PIL import Image
import numpy as np
x = Image.open("../samples/009.jpg").convert('L')
x = x.resize((128, 128))
x = np.asarray(x, dtype=np.float32)
x = x[None, None, ...]
x = torch.from_numpy(x)
net = ResIRSE(512, 0.6)
net.eval()
with torch.no_grad():
out = net(x)
print(out.shape)

View File

@ -0,0 +1,384 @@
import torch
import torch.nn as nn
# from config import config as conf
from ..config import config as conf
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
#from .utils import load_state_dict_from_url
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=conf.embedding_size, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
#print('poolBefore', x.shape)
x = self.avgpool(x)
#print('poolAfter', x.shape)
x = torch.flatten(x, 1)
#print('fcBefore',x.shape)
x = self.fc(x)
# print('fcAfter',x.shape)
return x
def forward(self, x):
return self._forward_impl(x)
# def _resnet(arch, block, layers, pretrained, progress, **kwargs):
# model = ResNet(block, layers, **kwargs)
# if pretrained:
# state_dict = load_state_dict_from_url(model_urls[arch],
# progress=progress)
# model.load_state_dict(state_dict, strict=False)
# return model
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
#print('state_dict',state_dict)
src_state_dict = state_dict
target_state_dict = model.state_dict()
skip_keys = []
# skip mismatch size tensors in case of pretraining
for k in src_state_dict.keys():
if k not in target_state_dict:
continue
if src_state_dict[k].size() != target_state_dict[k].size():
skip_keys.append(k)
for k in skip_keys:
del src_state_dict[k]
missing_keys, unexpected_keys = model.load_state_dict(src_state_dict, strict=False)
return model
def resnet18(pretrained=True, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)

View File

@ -0,0 +1,4 @@
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url

View File

@ -0,0 +1,143 @@
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 18 17:21:01 2024
@author: ym
"""
import numpy as np
import torch
import cv2
import torch.nn as nn
import torchvision.transforms as T
from .model import mobilevit_s, resnet18, resnet34, resnet50, mobilenet_v2, MobileNetV3_Small
from .config import config as conf
class ReIDInterface:
def __init__(self, config):
self.device = conf.device
if conf.backbone == 'resnet18':
# model = ResIRSE(img_size, embedding_size, conf.drop_ratio).to(device)
model = resnet18().to(self.device)
elif conf.backbone == 'resnet34':
model = resnet34().to(self.device)
elif conf.backbone == 'resnet50':
model = resnet50().to(self.device)
elif conf.backbone == 'mobilevit_s':
model = mobilevit_s().to(self.device)
elif conf.backbone == 'mobilenetv3':
model = MobileNetV3_Small().to(self.device)
else:
model = mobilenet_v2().to(self.device)
self.batch_size = conf.batch_size
self.embedding_size = conf.embedding_size
self.img_size = conf.img_size
self.model_path = conf.model_path
# 原输入为PIL
self.transform = T.Compose([
T.ToTensor(),
T.Resize((self.img_size, self.img_size)),
T.ConvertImageDtype(torch.float32),
T.Normalize(mean=[0.5], std=[0.5]),
])
self.model = nn.DataParallel(model).to(self.device)
self.model.load_state_dict(torch.load(self.model_path, map_location=self.device))
self.model.eval()
def inference(self, images, detections):
if isinstance(images, np.ndarray):
features = self.inference_image(images, detections)
return features
batch_patches = []
patches = []
for i, img in enumerate(images):
img = img.copy()
patch = self.transform(img)
if str(self.device) != "cpu":
patch = patch.to(device=self.device).half()
else:
patch = patch.to(device=self.device)
patches.append(patch)
if (i + 1) % self.batch_size == 0:
patches = torch.stack(patches, dim=0)
batch_patches.append(patches)
patches = []
if len(patches):
patches = torch.stack(patches, dim=0)
batch_patches.append(patches)
features = np.zeros((0, self.embedding_size))
for patches in batch_patches:
pred=self.model(patches)
pred[torch.isinf(pred)] = 1.0
feat = pred.cpu().data.numpy()
features = np.vstack((features, feat))
return features
def inference_image(self, image, detections):
H, W, _ = np.shape(image)
batch_patches = []
patches = []
for d in range(np.size(detections, 0)):
tlbr = detections[d, :4].astype(np.int_)
tlbr[0] = max(0, tlbr[0])
tlbr[1] = max(0, tlbr[1])
tlbr[2] = min(W - 1, tlbr[2])
tlbr[3] = min(H - 1, tlbr[3])
img = image[tlbr[1]:tlbr[3], tlbr[0]:tlbr[2], :]
img = img[:, :, ::-1].copy() # the model expects RGB inputs
patch = self.transform(img)
# patch = patch.to(device=self.device).half()
if str(self.device) != "cpu":
patch = patch.to(device=self.device).half()
else:
patch = patch.to(device=self.device)
patches.append(patch)
if (d + 1) % self.batch_size == 0:
patches = torch.stack(patches, dim=0)
batch_patches.append(patches)
patches = []
if len(patches):
patches = torch.stack(patches, dim=0)
batch_patches.append(patches)
features = np.zeros((0, self.embedding_size))
for patches in batch_patches:
pred = self.model(patches)
pred[torch.isinf(pred)] = 1.0
feat = pred.cpu().data.numpy()
features = np.vstack((features, feat))
return features

View File

@ -0,0 +1,21 @@
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 19 16:10:39 2024
@author: ym
"""
import torch
from model.resnet_pre import resnet18
def main():
model_path = "best.pth"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = resnet18().to(device)
model.load_state_dict(torch.load(model_path, map_location=device))
if __name__ == "__main__":
main()

View File

@ -0,0 +1,66 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
from functools import partial
import torch
from ultralytics.utils import IterableSimpleNamespace, yaml_load
from ultralytics.utils.checks import check_yaml
from .bot_sort import BOTSORT
from .byte_tracker import BYTETracker
TRACKER_MAP = {'bytetrack': BYTETracker, 'botsort': BOTSORT}
def on_predict_start(predictor, persist=False):
"""
Initialize trackers for object tracking during prediction.
Args:
predictor (object): The predictor object to initialize trackers for.
persist (bool, optional): Whether to persist the trackers if they already exist. Defaults to False.
Raises:
AssertionError: If the tracker_type is not 'bytetrack' or 'botsort'.
"""
if hasattr(predictor, 'trackers') and persist:
return
tracker = check_yaml(predictor.args.tracker)
cfg = IterableSimpleNamespace(**yaml_load(tracker))
assert cfg.tracker_type in ['bytetrack', 'botsort'], \
f"Only support 'bytetrack' and 'botsort' for now, but got '{cfg.tracker_type}'"
trackers = []
for _ in range(predictor.dataset.bs):
tracker = TRACKER_MAP[cfg.tracker_type](args=cfg, frame_rate=30)
trackers.append(tracker)
predictor.trackers = trackers
def on_predict_postprocess_end(predictor):
"""Postprocess detected boxes and update with object tracking."""
bs = predictor.dataset.bs
im0s = predictor.batch[1]
for i in range(bs):
det = predictor.results[i].boxes.cpu().numpy()
if len(det) == 0:
continue
tracks = predictor.trackers[i].update(det, im0s[i])
if len(tracks) == 0:
continue
idx = tracks[:, -1].astype(int)
predictor.results[i] = predictor.results[i][idx]
predictor.results[i].update(boxes=torch.as_tensor(tracks[:, :-1]))
def register_tracker(model, persist):
"""
Register tracking callbacks to the model for object tracking during prediction.
Args:
model (object): The model object to register tracking callbacks for.
persist (bool): Whether to persist the trackers if they already exist.
"""
model.add_callback('on_predict_start', partial(on_predict_start, persist=persist))
model.add_callback('on_predict_postprocess_end', on_predict_postprocess_end)

View File

@ -0,0 +1,3 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license

Binary file not shown.

View File

@ -0,0 +1,279 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
import copy
import cv2
import numpy as np
from ultralytics.utils import LOGGER
class GMC:
def __init__(self, method='sparseOptFlow', downscale=2):
"""Initialize a video tracker with specified parameters."""
super().__init__()
self.method = method
self.downscale = max(1, int(downscale))
if self.method == 'orb':
self.detector = cv2.FastFeatureDetector_create(20)
self.extractor = cv2.ORB_create()
self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING)
elif self.method == 'sift':
self.detector = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20)
self.extractor = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20)
self.matcher = cv2.BFMatcher(cv2.NORM_L2)
elif self.method == 'ecc':
number_of_iterations = 5000
termination_eps = 1e-6
self.warp_mode = cv2.MOTION_EUCLIDEAN
self.criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps)
elif self.method == 'sparseOptFlow':
self.feature_params = dict(maxCorners=1000,
qualityLevel=0.01,
minDistance=1,
blockSize=3,
useHarrisDetector=False,
k=0.04)
elif self.method in ['none', 'None', None]:
self.method = None
else:
raise ValueError(f'Error: Unknown GMC method:{method}')
self.prevFrame = None
self.prevKeyPoints = None
self.prevDescriptors = None
self.initializedFirstFrame = False
def apply(self, raw_frame, detections=None):
"""Apply object detection on a raw frame using specified method."""
if self.method in ['orb', 'sift']:
return self.applyFeatures(raw_frame, detections)
elif self.method == 'ecc':
return self.applyEcc(raw_frame, detections)
elif self.method == 'sparseOptFlow':
return self.applySparseOptFlow(raw_frame, detections)
else:
return np.eye(2, 3)
def applyEcc(self, raw_frame, detections=None):
"""Initialize."""
height, width, _ = raw_frame.shape
frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)
H = np.eye(2, 3, dtype=np.float32)
# Downscale image (TODO: consider using pyramids)
if self.downscale > 1.0:
frame = cv2.GaussianBlur(frame, (3, 3), 1.5)
frame = cv2.resize(frame, (width // self.downscale, height // self.downscale))
width = width // self.downscale
height = height // self.downscale
# Handle first frame
if not self.initializedFirstFrame:
# Initialize data
self.prevFrame = frame.copy()
# Initialization done
self.initializedFirstFrame = True
return H
# Run the ECC algorithm. The results are stored in warp_matrix.
# (cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria)
try:
(cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria, None, 1)
except Exception as e:
LOGGER.warning(f'WARNING: find transform failed. Set warp as identity {e}')
return H
def applyFeatures(self, raw_frame, detections=None):
"""Initialize."""
height, width, _ = raw_frame.shape
frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)
H = np.eye(2, 3)
# Downscale image (TODO: consider using pyramids)
if self.downscale > 1.0:
# frame = cv2.GaussianBlur(frame, (3, 3), 1.5)
frame = cv2.resize(frame, (width // self.downscale, height // self.downscale))
width = width // self.downscale
height = height // self.downscale
# Find the keypoints
mask = np.zeros_like(frame)
# mask[int(0.05 * height): int(0.95 * height), int(0.05 * width): int(0.95 * width)] = 255
mask[int(0.02 * height):int(0.98 * height), int(0.02 * width):int(0.98 * width)] = 255
if detections is not None:
for det in detections:
tlbr = (det[:4] / self.downscale).astype(np.int_)
mask[tlbr[1]:tlbr[3], tlbr[0]:tlbr[2]] = 0
keypoints = self.detector.detect(frame, mask)
# Compute the descriptors
keypoints, descriptors = self.extractor.compute(frame, keypoints)
# Handle first frame
if not self.initializedFirstFrame:
# Initialize data
self.prevFrame = frame.copy()
self.prevKeyPoints = copy.copy(keypoints)
self.prevDescriptors = copy.copy(descriptors)
# Initialization done
self.initializedFirstFrame = True
return H
# Match descriptors.
knnMatches = self.matcher.knnMatch(self.prevDescriptors, descriptors, 2)
# Filtered matches based on smallest spatial distance
matches = []
spatialDistances = []
maxSpatialDistance = 0.25 * np.array([width, height])
# Handle empty matches case
if len(knnMatches) == 0:
# Store to next iteration
self.prevFrame = frame.copy()
self.prevKeyPoints = copy.copy(keypoints)
self.prevDescriptors = copy.copy(descriptors)
return H
for m, n in knnMatches:
if m.distance < 0.9 * n.distance:
prevKeyPointLocation = self.prevKeyPoints[m.queryIdx].pt
currKeyPointLocation = keypoints[m.trainIdx].pt
spatialDistance = (prevKeyPointLocation[0] - currKeyPointLocation[0],
prevKeyPointLocation[1] - currKeyPointLocation[1])
if (np.abs(spatialDistance[0]) < maxSpatialDistance[0]) and \
(np.abs(spatialDistance[1]) < maxSpatialDistance[1]):
spatialDistances.append(spatialDistance)
matches.append(m)
meanSpatialDistances = np.mean(spatialDistances, 0)
stdSpatialDistances = np.std(spatialDistances, 0)
inliers = (spatialDistances - meanSpatialDistances) < 2.5 * stdSpatialDistances
goodMatches = []
prevPoints = []
currPoints = []
for i in range(len(matches)):
if inliers[i, 0] and inliers[i, 1]:
goodMatches.append(matches[i])
prevPoints.append(self.prevKeyPoints[matches[i].queryIdx].pt)
currPoints.append(keypoints[matches[i].trainIdx].pt)
prevPoints = np.array(prevPoints)
currPoints = np.array(currPoints)
# Draw the keypoint matches on the output image
# if False:
# import matplotlib.pyplot as plt
# matches_img = np.hstack((self.prevFrame, frame))
# matches_img = cv2.cvtColor(matches_img, cv2.COLOR_GRAY2BGR)
# W = np.size(self.prevFrame, 1)
# for m in goodMatches:
# prev_pt = np.array(self.prevKeyPoints[m.queryIdx].pt, dtype=np.int_)
# curr_pt = np.array(keypoints[m.trainIdx].pt, dtype=np.int_)
# curr_pt[0] += W
# color = np.random.randint(0, 255, 3)
# color = (int(color[0]), int(color[1]), int(color[2]))
#
# matches_img = cv2.line(matches_img, prev_pt, curr_pt, tuple(color), 1, cv2.LINE_AA)
# matches_img = cv2.circle(matches_img, prev_pt, 2, tuple(color), -1)
# matches_img = cv2.circle(matches_img, curr_pt, 2, tuple(color), -1)
#
# plt.figure()
# plt.imshow(matches_img)
# plt.show()
# Find rigid matrix
if (np.size(prevPoints, 0) > 4) and (np.size(prevPoints, 0) == np.size(prevPoints, 0)):
H, inliers = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC)
# Handle downscale
if self.downscale > 1.0:
H[0, 2] *= self.downscale
H[1, 2] *= self.downscale
else:
LOGGER.warning('WARNING: not enough matching points')
# Store to next iteration
self.prevFrame = frame.copy()
self.prevKeyPoints = copy.copy(keypoints)
self.prevDescriptors = copy.copy(descriptors)
return H
def applySparseOptFlow(self, raw_frame, detections=None):
"""Initialize."""
height, width, _ = raw_frame.shape
frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)
H = np.eye(2, 3)
# Downscale image
if self.downscale > 1.0:
# frame = cv2.GaussianBlur(frame, (3, 3), 1.5)
frame = cv2.resize(frame, (width // self.downscale, height // self.downscale))
# Find the keypoints
keypoints = cv2.goodFeaturesToTrack(frame, mask=None, **self.feature_params)
# Handle first frame
if not self.initializedFirstFrame:
# Initialize data
self.prevFrame = frame.copy()
self.prevKeyPoints = copy.copy(keypoints)
# Initialization done
self.initializedFirstFrame = True
return H
# Find correspondences
matchedKeypoints, status, err = cv2.calcOpticalFlowPyrLK(self.prevFrame, frame, self.prevKeyPoints, None)
# Leave good correspondences only
prevPoints = []
currPoints = []
for i in range(len(status)):
if status[i]:
prevPoints.append(self.prevKeyPoints[i])
currPoints.append(matchedKeypoints[i])
prevPoints = np.array(prevPoints)
currPoints = np.array(currPoints)
# Find rigid matrix
if (np.size(prevPoints, 0) > 4) and (np.size(prevPoints, 0) == np.size(prevPoints, 0)):
H, inliers = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC)
# Handle downscale
if self.downscale > 1.0:
H[0, 2] *= self.downscale
H[1, 2] *= self.downscale
else:
LOGGER.warning('WARNING: not enough matching points')
# Store to next iteration
self.prevFrame = frame.copy()
self.prevKeyPoints = copy.copy(keypoints)
return H

View File

@ -0,0 +1,368 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
import numpy as np
import scipy.linalg
class KalmanFilterXYAH:
"""
For bytetrack. A simple Kalman filter for tracking bounding boxes in image space.
The 8-dimensional state space (x, y, a, h, vx, vy, va, vh) contains the bounding box center position (x, y),
aspect ratio a, height h, and their respective velocities.
Object motion follows a constant velocity model. The bounding box location (x, y, a, h) is taken as direct
observation of the state space (linear observation model).
"""
def __init__(self):
"""Initialize Kalman filter model matrices with motion and observation uncertainty weights."""
ndim, dt = 4, 1.
# Create Kalman filter model matrices.
self._motion_mat = np.eye(2 * ndim, 2 * ndim)
for i in range(ndim):
self._motion_mat[i, ndim + i] = dt
self._update_mat = np.eye(ndim, 2 * ndim)
# Motion and observation uncertainty are chosen relative to the current state estimate. These weights control
# the amount of uncertainty in the model. This is a bit hacky.
self._std_weight_position = 1. / 20
self._std_weight_velocity = 1. / 160
def initiate(self, measurement):
"""
Create track from unassociated measurement.
Parameters
----------
measurement : ndarray
Bounding box coordinates (x, y, a, h) with center position (x, y),
aspect ratio a, and height h.
Returns
-------
(ndarray, ndarray)
Returns the mean vector (8 dimensional) and covariance matrix (8x8
dimensional) of the new track. Unobserved velocities are initialized
to 0 mean.
"""
mean_pos = measurement
mean_vel = np.zeros_like(mean_pos)
mean = np.r_[mean_pos, mean_vel]
std = [
2 * self._std_weight_position * measurement[3], 2 * self._std_weight_position * measurement[3], 1e-2,
2 * self._std_weight_position * measurement[3], 10 * self._std_weight_velocity * measurement[3],
10 * self._std_weight_velocity * measurement[3], 1e-5, 10 * self._std_weight_velocity * measurement[3]]
covariance = np.diag(np.square(std))
return mean, covariance
def predict(self, mean, covariance):
"""
Run Kalman filter prediction step.
Parameters
----------
mean : ndarray
The 8 dimensional mean vector of the object state at the previous time step.
covariance : ndarray
The 8x8 dimensional covariance matrix of the object state at the previous time step.
Returns
-------
(ndarray, ndarray)
Returns the mean vector and covariance matrix of the predicted state. Unobserved velocities are
initialized to 0 mean.
"""
std_pos = [
self._std_weight_position * mean[3], self._std_weight_position * mean[3], 1e-2,
self._std_weight_position * mean[3]]
std_vel = [
self._std_weight_velocity * mean[3], self._std_weight_velocity * mean[3], 1e-5,
self._std_weight_velocity * mean[3]]
motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))
# mean = np.dot(self._motion_mat, mean)
mean = np.dot(mean, self._motion_mat.T)
covariance = np.linalg.multi_dot((self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
return mean, covariance
def project(self, mean, covariance):
"""
Project state distribution to measurement space.
Parameters
----------
mean : ndarray
The state's mean vector (8 dimensional array).
covariance : ndarray
The state's covariance matrix (8x8 dimensional).
Returns
-------
(ndarray, ndarray)
Returns the projected mean and covariance matrix of the given state estimate.
"""
std = [
self._std_weight_position * mean[3], self._std_weight_position * mean[3], 1e-1,
self._std_weight_position * mean[3]]
innovation_cov = np.diag(np.square(std))
mean = np.dot(self._update_mat, mean)
covariance = np.linalg.multi_dot((self._update_mat, covariance, self._update_mat.T))
return mean, covariance + innovation_cov
def multi_predict(self, mean, covariance):
"""
Run Kalman filter prediction step (Vectorized version).
Parameters
----------
mean : ndarray
The Nx8 dimensional mean matrix of the object states at the previous time step.
covariance : ndarray
The Nx8x8 dimensional covariance matrix of the object states at the previous time step.
Returns
-------
(ndarray, ndarray)
Returns the mean vector and covariance matrix of the predicted state. Unobserved velocities are
initialized to 0 mean.
"""
std_pos = [
self._std_weight_position * mean[:, 3], self._std_weight_position * mean[:, 3],
1e-2 * np.ones_like(mean[:, 3]), self._std_weight_position * mean[:, 3]]
std_vel = [
self._std_weight_velocity * mean[:, 3], self._std_weight_velocity * mean[:, 3],
1e-5 * np.ones_like(mean[:, 3]), self._std_weight_velocity * mean[:, 3]]
sqr = np.square(np.r_[std_pos, std_vel]).T
motion_cov = [np.diag(sqr[i]) for i in range(len(mean))]
motion_cov = np.asarray(motion_cov)
mean = np.dot(mean, self._motion_mat.T)
left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2))
covariance = np.dot(left, self._motion_mat.T) + motion_cov
return mean, covariance
def update(self, mean, covariance, measurement):
"""
Run Kalman filter correction step.
Parameters
----------
mean : ndarray
The predicted state's mean vector (8 dimensional).
covariance : ndarray
The state's covariance matrix (8x8 dimensional).
measurement : ndarray
The 4 dimensional measurement vector (x, y, a, h), where (x, y) is the center position, a the aspect
ratio, and h the height of the bounding box.
Returns
-------
(ndarray, ndarray)
Returns the measurement-corrected state distribution.
"""
projected_mean, projected_cov = self.project(mean, covariance)
chol_factor, lower = scipy.linalg.cho_factor(projected_cov, lower=True, check_finite=False)
kalman_gain = scipy.linalg.cho_solve((chol_factor, lower),
np.dot(covariance, self._update_mat.T).T,
check_finite=False).T
innovation = measurement - projected_mean
new_mean = mean + np.dot(innovation, kalman_gain.T)
new_covariance = covariance - np.linalg.multi_dot((kalman_gain, projected_cov, kalman_gain.T))
return new_mean, new_covariance
def gating_distance(self, mean, covariance, measurements, only_position=False, metric='maha'):
"""
Compute gating distance between state distribution and measurements. A suitable distance threshold can be
obtained from `chi2inv95`. If `only_position` is False, the chi-square distribution has 4 degrees of
freedom, otherwise 2.
Parameters
----------
mean : ndarray
Mean vector over the state distribution (8 dimensional).
covariance : ndarray
Covariance of the state distribution (8x8 dimensional).
measurements : ndarray
An Nx4 dimensional matrix of N measurements, each in format (x, y, a, h) where (x, y) is the bounding box
center position, a the aspect ratio, and h the height.
only_position : Optional[bool]
If True, distance computation is done with respect to the bounding box center position only.
Returns
-------
ndarray
Returns an array of length N, where the i-th element contains the squared Mahalanobis distance between
(mean, covariance) and `measurements[i]`.
"""
mean, covariance = self.project(mean, covariance)
if only_position:
mean, covariance = mean[:2], covariance[:2, :2]
measurements = measurements[:, :2]
d = measurements - mean
if metric == 'gaussian':
return np.sum(d * d, axis=1)
elif metric == 'maha':
cholesky_factor = np.linalg.cholesky(covariance)
z = scipy.linalg.solve_triangular(cholesky_factor, d.T, lower=True, check_finite=False, overwrite_b=True)
return np.sum(z * z, axis=0) # square maha
else:
raise ValueError('invalid distance metric')
class KalmanFilterXYWH(KalmanFilterXYAH):
"""
For BoT-SORT. A simple Kalman filter for tracking bounding boxes in image space.
The 8-dimensional state space (x, y, w, h, vx, vy, vw, vh) contains the bounding box center position (x, y),
width w, height h, and their respective velocities.
Object motion follows a constant velocity model. The bounding box location (x, y, w, h) is taken as direct
observation of the state space (linear observation model).
"""
def initiate(self, measurement):
"""
Create track from unassociated measurement.
Parameters
----------
measurement : ndarray
Bounding box coordinates (x, y, w, h) with center position (x, y), width w, and height h.
Returns
-------
(ndarray, ndarray)
Returns the mean vector (8 dimensional) and covariance matrix (8x8 dimensional) of the new track.
Unobserved velocities are initialized to 0 mean.
"""
mean_pos = measurement
mean_vel = np.zeros_like(mean_pos)
mean = np.r_[mean_pos, mean_vel]
std = [
2 * self._std_weight_position * measurement[2], 2 * self._std_weight_position * measurement[3],
2 * self._std_weight_position * measurement[2], 2 * self._std_weight_position * measurement[3],
10 * self._std_weight_velocity * measurement[2], 10 * self._std_weight_velocity * measurement[3],
10 * self._std_weight_velocity * measurement[2], 10 * self._std_weight_velocity * measurement[3]]
covariance = np.diag(np.square(std))
return mean, covariance
def predict(self, mean, covariance):
"""
Run Kalman filter prediction step.
Parameters
----------
mean : ndarray
The 8 dimensional mean vector of the object state at the previous time step.
covariance : ndarray
The 8x8 dimensional covariance matrix of the object state at the previous time step.
Returns
-------
(ndarray, ndarray)
Returns the mean vector and covariance matrix of the predicted state. Unobserved velocities are
initialized to 0 mean.
"""
std_pos = [
self._std_weight_position * mean[2], self._std_weight_position * mean[3],
self._std_weight_position * mean[2], self._std_weight_position * mean[3]]
std_vel = [
self._std_weight_velocity * mean[2], self._std_weight_velocity * mean[3],
self._std_weight_velocity * mean[2], self._std_weight_velocity * mean[3]]
motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))
mean = np.dot(mean, self._motion_mat.T)
covariance = np.linalg.multi_dot((self._motion_mat, covariance, self._motion_mat.T)) + motion_cov
return mean, covariance
def project(self, mean, covariance):
"""
Project state distribution to measurement space.
Parameters
----------
mean : ndarray
The state's mean vector (8 dimensional array).
covariance : ndarray
The state's covariance matrix (8x8 dimensional).
Returns
-------
(ndarray, ndarray)
Returns the projected mean and covariance matrix of the given state estimate.
"""
std = [
self._std_weight_position * mean[2], self._std_weight_position * mean[3],
self._std_weight_position * mean[2], self._std_weight_position * mean[3]]
innovation_cov = np.diag(np.square(std))
mean = np.dot(self._update_mat, mean)
covariance = np.linalg.multi_dot((self._update_mat, covariance, self._update_mat.T))
return mean, covariance + innovation_cov
def multi_predict(self, mean, covariance):
"""
Run Kalman filter prediction step (Vectorized version).
Parameters
----------
mean : ndarray
The Nx8 dimensional mean matrix of the object states at the previous time step.
covariance : ndarray
The Nx8x8 dimensional covariance matrix of the object states at the previous time step.
Returns
-------
(ndarray, ndarray)
Returns the mean vector and covariance matrix of the predicted state. Unobserved velocities are
initialized to 0 mean.
"""
std_pos = [
self._std_weight_position * mean[:, 2], self._std_weight_position * mean[:, 3],
self._std_weight_position * mean[:, 2], self._std_weight_position * mean[:, 3]]
std_vel = [
self._std_weight_velocity * mean[:, 2], self._std_weight_velocity * mean[:, 3],
self._std_weight_velocity * mean[:, 2], self._std_weight_velocity * mean[:, 3]]
sqr = np.square(np.r_[std_pos, std_vel]).T
motion_cov = [np.diag(sqr[i]) for i in range(len(mean))]
motion_cov = np.asarray(motion_cov)
mean = np.dot(mean, self._motion_mat.T)
left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2))
covariance = np.dot(left, self._motion_mat.T) + motion_cov
return mean, covariance
def update(self, mean, covariance, measurement):
"""
Run Kalman filter correction step.
Parameters
----------
mean : ndarray
The predicted state's mean vector (8 dimensional).
covariance : ndarray
The state's covariance matrix (8x8 dimensional).
measurement : ndarray
The 4 dimensional measurement vector (x, y, w, h), where (x, y) is the center position, w the width,
and h the height of the bounding box.
Returns
-------
(ndarray, ndarray)
Returns the measurement-corrected state distribution.
"""
return super().update(mean, covariance, measurement)

View File

@ -0,0 +1,215 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
import numpy as np
import math
import torch
import scipy
from scipy.spatial.distance import cdist
# from ultralytics.utils.metrics import bbox_ioa
try:
import lap # for linear_assignment
assert lap.__version__ # verify package is not directory
except (ImportError, AssertionError, AttributeError):
from ultralytics.utils.checks import check_requirements
check_requirements('lapx>=0.5.2') # update to lap package from https://github.com/rathaROG/lapx
import lap
def bbox_iou(box1, box2, GIoU=False, DIoU=False, CIoU=False, eps=1e-7):
'''由根目录下 utils.metrics.metrics.bbox_iou 更改而来'''
# Returns Intersection over Union (IoU) of box1(1,4) to box2(n,4)
# Get the coordinates of bounding boxes
# x1, y1, x2, y2 = box1
# box1 = torch.tensor(box1)
# box2 = torch.tensor(box2)
b1_x1, b1_y1, b1_x2, b1_y2 = box1.T
b2_x1, b2_y1, b2_x2, b2_y2 = box2.T
w1, h1 = b1_x2 - b1_x1, (b1_y2 - b1_y1).clip(eps)
w2, h2 = b2_x2 - b2_x1, (b2_y2 - b2_y1).clip(eps)
# Intersection area
# inter = (b1_x2.minimum(b2_x2) - b1_x1.maximum(b2_x1)).clamp(0) * \
# (b1_y2.minimum(b2_y2) - b1_y1.maximum(b2_y1)).clamp(0)
inter = (np.minimum(b1_x2[:, None], b2_x2) - np.maximum(b1_x1[:, None], b2_x1)).clip(0) * \
(np.minimum(b1_y2[:, None], b2_y2) - np.maximum(b1_y1[:, None], b2_y1)).clip(0)
# Union Area
box1_area = w1 * h1
box2_area = w2 * h2
union = box1_area[:, None] + box2_area - inter + eps
# IoU
iou = inter / union
if CIoU or DIoU or GIoU:
cw = np.maximum(b1_x2[:, None], b2_x2) - np.minimum(b1_x1[:, None], b2_x1) # convex (smallest enclosing box) width
ch = np.maximum(b1_y2[:, None], b2_y2) - np.minimum(b1_y1[:, None], b2_y1) # convex height
if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
'''center dist ** 2'''
rho2 = ((b1_x1[:, None] + b1_x2[:, None] - b2_x1 - b2_x2) ** 2 + \
(b1_y1[:, None] + b1_y2[:, None] - b2_y1 - b2_y2) ** 2) / 4
if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * (np.arctan(w1 / h1)[:, None] - np.arctan(w2 / h2))**2
with torch.no_grad():
alpha = v / (v - iou + (1 + eps))
return iou - (rho2 / c2 + v * alpha) # CIoU
return iou - rho2 / c2 # DIoU
c_area = cw * ch + eps # convex area
return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf
return iou # IoU
def bbox_ioa(box1, box2, iou=False, eps=1e-7):
"""
Calculate the intersection over box2 area given box1 and box2. Boxes are in x1y1x2y2 format.
Args:
box1 (np.array): A numpy array of shape (n, 4) representing n bounding boxes.
box2 (np.array): A numpy array of shape (m, 4) representing m bounding boxes.
iou (bool): Calculate the standard iou if True else return inter_area/box2_area.
eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7.
Returns:
(np.array): A numpy array of shape (n, m) representing the intersection over box2 area.
"""
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1.T
b2_x1, b2_y1, b2_x2, b2_y2 = box2.T
# Intersection area
inter_area = (np.minimum(b1_x2[:, None], b2_x2) - np.maximum(b1_x1[:, None], b2_x1)).clip(0) * \
(np.minimum(b1_y2[:, None], b2_y2) - np.maximum(b1_y1[:, None], b2_y1)).clip(0)
# box2 area
area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1)
if iou:
box1_area = (b1_x2 - b1_x1) * (b1_y2 - b1_y1)
area = area + box1_area[:, None] - inter_area
# Intersection over box2 area
return inter_area / (area + eps)
# def linear_assignment(cost_matrix, thresh, use_lap=True):
def linear_assignment(cost_matrix: np.ndarray, thresh: float, use_lap: bool = True) -> tuple:
"""
Perform linear assignment using scipy or lap.lapjv.
Args:
cost_matrix (np.ndarray): The matrix containing cost values for assignments.
thresh (float): Threshold for considering an assignment valid.
use_lap (bool, optional): Whether to use lap.lapjv. Defaults to True.
Returns:
(tuple): Tuple containing matched indices, unmatched indices from 'a', and unmatched indices from 'b'.
"""
if cost_matrix.size == 0:
return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1]))
if use_lap:
# https://github.com/gatagat/lap
_, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh)
matches = [[ix, mx] for ix, mx in enumerate(x) if mx >= 0]
unmatched_a = np.where(x < 0)[0]
unmatched_b = np.where(y < 0)[0]
else:
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linear_sum_assignment.html
x, y = scipy.optimize.linear_sum_assignment(cost_matrix) # row x, col y
matches = np.asarray([[x[i], y[i]] for i in range(len(x)) if cost_matrix[x[i], y[i]] <= thresh])
if len(matches) == 0:
unmatched_a = list(np.arange(cost_matrix.shape[0]))
unmatched_b = list(np.arange(cost_matrix.shape[1]))
else:
unmatched_a = list(set(np.arange(cost_matrix.shape[0])) - set(matches[:, 0]))
unmatched_b = list(set(np.arange(cost_matrix.shape[1])) - set(matches[:, 1]))
return matches, unmatched_a, unmatched_b
# def iou_distance(atracks, btracks):
def iou_distance(atracks: list, btracks: list) -> np.ndarray:
"""
Compute cost based on Intersection over Union (IoU) between tracks.
Args:
atracks (list[STrack] | list[np.ndarray]): List of tracks 'a' or bounding boxes.
btracks (list[STrack] | list[np.ndarray]): List of tracks 'b' or bounding boxes.
Returns:
(np.ndarray): Cost matrix computed based on IoU.
"""
if (len(atracks) > 0 and isinstance(atracks[0], np.ndarray)) \
or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)):
atlbrs = atracks
btlbrs = btracks
else:
atlbrs = [track.tlbr for track in atracks]
btlbrs = [track.tlbr for track in btracks]
ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float32)
if len(atlbrs) and len(btlbrs):
box1 = np.ascontiguousarray(atlbrs, dtype=np.float32)
box2 = np.ascontiguousarray(btlbrs, dtype=np.float32)
ious = bbox_ioa(box1, box2, iou=True)
ious_g = bbox_iou(box1, box2, GIoU=True).clip(-1.0, 1.0)
ious_d = bbox_iou(box1, box2, DIoU=True).clip(-1.0, 1.0)
ious_c = bbox_iou(box1, box2, CIoU=True).clip(-1.0, 1.0)
return 1 - ious # cost matrix
def embedding_distance(tracks, detections, metric='cosine'):
"""
Compute distance between tracks and detections based on embeddings.
Args:
tracks (list[STrack]): List of tracks.
detections (list[BaseTrack]): List of detections.
metric (str, optional): Metric for distance computation. Defaults to 'cosine'.
Returns:
(np.ndarray): Cost matrix computed based on embeddings.
"""
cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float32)
if cost_matrix.size == 0:
return cost_matrix
det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float32)
# for i, track in enumerate(tracks):
# cost_matrix[i, :] = np.maximum(0.0, cdist(track.smooth_feat.reshape(1,-1), det_features, metric))
track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float32)
cost_matrix = np.maximum(0.0, cdist(track_features, det_features, metric)) # Normalized features
return cost_matrix
def fuse_score(cost_matrix, detections):
"""
Fuses cost matrix with detection scores to produce a single similarity matrix.
Args:
cost_matrix (np.ndarray): The matrix containing cost values for assignments.
detections (list[BaseTrack]): List of detections with scores.
Returns:
(np.ndarray): Fused similarity matrix.
"""
if cost_matrix.size == 0:
return cost_matrix
iou_sim = 1 - cost_matrix
det_scores = np.array([det.score for det in detections])
det_scores = np.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0)
fuse_sim = iou_sim * det_scores
return 1 - fuse_sim # fuse_cost

View File

@ -0,0 +1,11 @@
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 31 17:06:34 2023
@author: ym
"""
from .proBoxes import Boxes, boxes_add_fid
from .iterYaml import IterableSimpleNamespace, yaml_load
__all__ = "IterableSimpleNamespace", "yaml_load", "Boxes", "boxes_add_fid"

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,92 @@
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 20 14:21:13 2023
@author: ym
"""
import cv2
# import sys
# sys.path.append(r"D:\DeepLearning\yolov5")
# from ultralytics.utils.plotting import Annotator, colors
from .plotting import Annotator, colors
class TrackAnnotator(Annotator):
def plotting_track(self, track, names='abc'):
"""
track[x, y, w, h, track_id, score, cls, frame_index]
boxes: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]
0 1 2 3 4 5 6 7 8
id跟踪id从 1 开始计数,
frame_index: 帧索引,从 1 开始计数
cls类别编号从 0 开始计数,用作 names 的 key 值
"""
id, cls = track[0, 4], track[0, 6]
if id >=0 and cls==0:
color = colors(int(cls), True)
elif id >=0 and cls!=0:
color = colors(int(id), True) # 不存在 id = 0不会和上面产生冲突
else:
color = colors(19, True) # 19为调色板的最后一个元素
nb = track.shape[0]
for i in range(nb):
if i == 0:
# label = f'{int(track[i, 4])}:({int(track[i, 7])})'
label = f'ID_{int(track[i, 4])}'
elif i == nb-1:
label = ''
# label = f'{int(track[i, 4])}:({int(track[i, 7])})&{int(nb)}'
else:
label = ''
self.circle_label(track[i, :], label, color=color)
def circle_label(self, track, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):
"""
绘制选定 track 的轨迹
"""
x, y = int((track[0]+track[2])/2), int((track[1]+track[3])/2)
cv2.circle(self.im, (x, y), 6, color, 2)
# txt_color = (0,0,0)
if label:
tf = max(self.lw - 1, 1) # font thickness
w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height
outside = x + w <= self.im.shape[1]-3
# p2 = x + w, y - h - 3 if outside else y + h + 3
# cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled
cv2.putText(self.im,
label, (x-10 if outside else x-w+2, y-20),
0,
# self.lw / 3,
self.lw/2,
txt_color,
thickness=tf,
lineType=cv2.LINE_AA)

View File

@ -0,0 +1,363 @@
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 15 15:26:38 2024
@author: ym
"""
import numpy as np
import cv2
import os
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from utils.annotator import TrackAnnotator
from utils.plotting import colors
def plot_frameID_y2(vts):
# boxes: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]
# 0, 1, 2, 3, 4, 5, 6, 7, 8
markers = ['o', 'v', '^', '<', '>', 's', 'p', 'P','*', '+', 'x', 'X', 'd', 'D', 'H']
colors = ['b', 'g', 'c', 'm', 'y', ]
bboxes = vts.bboxes
maxfid = max(vts.bboxes[:, 7])
CART_HIGH_THRESH1 = 430
TRACK_STATIC_THRESH = 8
fig = plt.figure(figsize=(16, 12))
gs = fig.add_gridspec(2, 1, left=0.1, right=0.9, bottom=0.1, top=0.9,
wspace=0.05, hspace=0.15)
# ax1, ax2 = axs
ax1 = fig.add_subplot(gs[0,0])
ax2 = fig.add_subplot(gs[1,0])
ax1.plot((0, maxfid+5), (1280-CART_HIGH_THRESH1, 1280-CART_HIGH_THRESH1), 'b--', linewidth=2 )
ax2.plot((0, maxfid+5), (1280-CART_HIGH_THRESH1, 1280-CART_HIGH_THRESH1), 'b--', linewidth=2 )
hands = [t for t in vts.Hands if not t.isHandStatic]
tracks = vts.join_tracks(vts.Residual, hands)
for i, track in enumerate(vts.tracks):
boxes = track.boxes
cls, tid = track.cls, track.tid
y2, fids = boxes[:, 3], boxes[:, 7]
if cls==0:
ax1.scatter(fids, 1280-y2, marker='4', s=50, color=colors[tid%len(colors)], label = f"ID_{tid}")
else:
ax1.scatter(fids, 1280-y2, marker=markers[tid%len(markers)], color=colors[tid%len(colors)],
s=50, label = f"ID_{tid}")
# hist, bins = np.histogram(1280-y2, bins='auto')
ax1.set_ylim([-50, 1350])
for i, track in enumerate(tracks):
boxes = track.boxes
cls, tid = track.cls, track.tid
y2, fids = boxes[:, 3], boxes[:, 7]
if cls==0:
ax2.scatter(fids, 1280-y2, marker='4', s=50, color=colors[tid%len(colors)], label = f"ID_{tid}")
else:
ax2.scatter(fids, 1280-y2, marker=markers[tid%len(markers)], color=colors[tid%len(colors)],
s=50, label = f"ID_{tid}")
# hist, bins = np.histogram(1280-y2, bins='auto')
ax2.set_ylim([-50, 1350])
ax1.grid(True), ax1.set_xlim(0, maxfid+5), ax1.set_title('y2')
ax1.legend()
ax2.grid(True), ax2.set_xlim(0, maxfid+5), ax2.set_title('y2')
ax2.legend()
# plt.show()
return plt
def draw_all_trajectories(vts, edgeline, save_dir, filename):
'''显示四种类型结果'''
file, ext = os.path.splitext(filename)
# edgeline = cv2.imread("./shopcart/cart_tempt/edgeline.png")
# edgeline2 = edgeline1.copy()
# edgeline = np.concatenate((edgeline1, edgeline2), exis = 1)
# =============================================================================
# '''1. tracks 5点轨迹'''
# for track in vts.tracks:
# if track.cls != 0:
# img = edgeline.copy()
# img = draw5points(track, img)
# pth = save_dir.joinpath(f"{file}_{track.tid}.png")
# cv2.imwrite(pth, img)
# =============================================================================
'''2. all tracks 中心轨迹'''
img1, img2 = edgeline.copy(), edgeline.copy()
img1 = drawTrack(vts.tracks, img1)
img2 = drawTrack(vts.Residual, img2)
img = np.concatenate((img1, img2), axis = 1)
H, W = img.shape[:2]
cv2.line(img, (int(W/2), 0), (int(W/2), H), (128, 255, 128), 2)
pth = save_dir.joinpath(f"{file}_show.png")
cv2.imwrite(str(pth), img)
# =============================================================================
# '''3. moving tracks 中心轨迹'''
# filename2 = f"{file}_show_r.png"
# img = edgeline.copy()
# img = drawTrack(vts.Residual, img)
# pth = save_dir.joinpath(filename2)
# cv2.imwrite(pth, img)
# =============================================================================
# =============================================================================
# '''5. tracks 时序trajmin、trajmax、arearate、incartrate'''
# plt = drawtracefeat(vts)
# pth = save_dir.joinpath(f"{file}_x.png")
# plt.savefig(pth)
# plt.close('all')
# =============================================================================
def drawFeatures(allvts, save_dir):
# [trajlen_min, trajdist_max, trajlen_rate, trajist_rate]]
feats = [track.feature for vts in allvts for track in vts.tracks]
feats = np.array(feats)
fig, ax = plt.subplots()
ax.scatter(feats[:,3], feats[:, 1], s=10)
# ax.set_xlim(0, 2)
# ax.set_ylim(0, 100)
ax.grid(True)
plt.show()
pth = save_dir.joinpath("scatter.png")
plt.savefig(pth)
plt.close('all')
def drawtracefeat(vts):
'''
需要对曲线进行特征提取和分类
boxes: [x1, y1, x2, y2, track_id, score, cls, frame_index, box_index]
0 1 2 3 4 5 6 7 8
'''
# matplotlib.use('Agg')
fid = vts.frameid
fid1, fid2 = min(fid), max(fid)
fig, axs = plt.subplots(2, 2,figsize=(18, 8))
kernel = [0.15, 0.7, 0.15]
for i, track in enumerate(vts.tracks):
boxes = track.boxes
tid = int(track.tid)
cls = int(track.cls)
posState = track.posState
if track.frnum>=5:
x1 = boxes[1:, 7]
y1 = track.trajmin
x11 = [i for i in range(int(min(x1)), int(max(x1)+1))]
y11 = np.interp(x11, x1, y1)
y11[1:-1] = np.convolve(y11, kernel, 'valid')
x3 = boxes[1:, 7]
y3 = track.trajmax
x33 = [i for i in range(int(min(x3)), int(max(x3)+1))]
y33 = np.interp(x33, x3, y3)
y33[1:-1] = np.convolve(y33, kernel, 'valid')
x2 = boxes[:, 7]
# y2 = track.Area/max(track.Area) - min(track.Area/max(track.Area))
y2 = track.Area/max(track.Area)
x22 = [i for i in range(int(min(x2)), int(max(x2)+1))]
y22 = np.interp(x22, x2, y2)
y22[1:-1] = np.convolve(y22, kernel, 'valid')
x4 = boxes[:, 7]
y4 = track.incartrates
x44 = [i for i in range(int(min(x4)), int(max(x4)+1))]
y44 = np.interp(x44, x4, y4)
y44[1:-1] = np.convolve(y44, kernel, 'valid')
elif track.frnum>=2:
x11 = boxes[1:, 7]
y11 = track.trajmin
x33 = boxes[1:, 7]
y33 = track.trajmax
x22 = boxes[:, 7]
# y22 = track.Area/max(track.Area) - min(track.Area/max(track.Area))
y22 = track.Area/max(track.Area)
x44 = boxes[:, 7]
y44 = track.incartrates
else:
continue
# cls!=0, max(y)>20
if cls!=0 and cls!=9 and posState>=2 and max(y11)>10 and max(y33)>10 and max(y22>0.1):
axs[0, 0].plot(x11, y11, label=f"ID_{tid}")
axs[0, 0].legend()
# axs[0].set_ylim(0, 100)
axs[0, 1].plot(x22, y22, label=f"ID_{tid}")
axs[0, 1].legend()
axs[1, 0].plot(x33, y33, label=f"ID_{tid}")
axs[1, 0].legend()
axs[1, 1].plot(x44, y44, label=f"ID_{tid}")
axs[1, 1].legend()
axs[0, 0].grid(True), axs[0, 0].set_xlim(fid1, fid2+10), axs[0, 0].set_title('trajmin')
axs[0, 1].grid(True), axs[0, 1].set_xlim(fid1, fid2+10), axs[0, 1].set_title('arearate')
axs[1, 0].grid(True), axs[1, 0].set_xlim(fid1, fid2+10), axs[1, 0].set_title('trajmax')
axs[1, 1].grid(True), axs[1, 1].set_xlim(fid1, fid2+10), axs[1, 1].set_ylim(-0.1, 1.1)
axs[1, 1].set_title('incartrate')
# pth = save_dir.joinpath(f"{file}_show_x.png")
# plt.savefig(pth)
# plt.savefig(f"./result/cls11_80212_time/{file}_show_x.png")
# plt.show()
return plt
def draw5points(track, img):
"""
显示中心点、4角点的轨迹以及轨迹 features
"""
colorx = np.array([[255, 255, 255], [255, 255, 255], [255, 255, 255], [255, 255, 255], [255, 255, 255],
[0, 0, 255], [0, 255, 0], [255, 51, 255], [102, 178, 255], [51, 153, 255],[255, 153, 153],
[255, 102, 102], [255, 51, 51], [153, 255, 153], [102, 255, 102], [51, 255, 51],
[255, 102, 255], [153, 204, 255], [255, 0, 0], [255, 255, 255]], dtype=np.uint8)
color = ((0, 0, 255), (255, 128, 0))
# img = cv2.imread("./shopcart/cart_tempt/edgeline.png")
boxes = track.boxes
cornpoints = track.cornpoints
trajlens = [int(t) for t in track.trajlens]
trajdist = [int(t) for t in track.trajdist]
if len(track.trajmin):
trajstd = np.std(track.trajmin)
else:
trajstd = 0
trajlen_min, trajlen_max, trajdist_min, trajdist_max, trajlen_rate, trajdist_rate = track.feature
for i in range(boxes.shape[0]):
cv2.circle(img, (int(cornpoints[i, 0]), int(cornpoints[i, 1])), 6, (255, 255, 255), 2)
cv2.circle(img, (int(cornpoints[i, 2]), int(cornpoints[i, 3])), 6, (255, 0, 255), 2)
cv2.circle(img, (int(cornpoints[i, 4]), int(cornpoints[i, 5])), 6, (0, 255, 0), 2)
cv2.circle(img, (int(cornpoints[i, 6]), int(cornpoints[i, 7])), 6, (64, 128, 255), 2)
cv2.circle(img, (int(cornpoints[i, 8]), int(cornpoints[i, 9])), 6, (255, 128, 64), 2)
label_0 = f"ID: {track.tid}, Class: {track.cls}"
label_1 = f"trajlens: {trajlens}, trajlen_min: {int(trajlen_min)}"
label_2 = f"trajdist: {trajdist}: trajdist_max: {int(trajdist_max)}"
label_3 = "trajlen_min/trajlen_max: {:.2f}/{:.2f} = {:.2f}".format(trajlen_min, trajlen_max, trajlen_rate)
label_4 = "trajdist_min/mwh : {:.2f}/{:.2f} = {:.2f}".format(trajdist_min, track.mwh, trajdist_rate)
label_5 = "std(trajmin) : {:.2f}".format(trajstd)
label_6 = "PCA(variance_ratio) : "
label_7 = "Rect W&H&Ratio : "
label_8 = ""
# label_8 = "IOU of incart/maxbox/minbox: {:.2f}, {:.2f}, {:.2f}".format(
# track.feature_ious[0], track.feature_ious[3], track.feature_ious[4])
'''=============== 最小轨迹长度索引 ===================='''
if track.imgBorder:
idx = 0
else:
idx = trajlens.index(min(trajlens))
'''=============== PCA ===================='''
if trajlens[idx] > 12:
X = cornpoints[:, 2*idx:2*(idx+1)]
pca = PCA()
pca.fit(X)
label_6 = "PCA(variance_ratio): {:.2f}".format(pca.explained_variance_ratio_[0])
# if sum(np.isnan(pca.explained_variance_ratio_)) == 0:
for i, (comp, var) in enumerate(zip(pca.components_, pca.explained_variance_ratio_)):
pt1 = (pca.mean_ - comp*var*200).astype(np.int64)
pt2 = (pca.mean_ + comp*var*200).astype(np.int64)
cv2.line(img, pt1, pt2, color=color[i], thickness=2)
'''=============== RECT ===================='''
rect = track.trajrects[idx]
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(img, [box], 0, (0, 255, 0), 2)
label_7 = "Rect W&H&Ratio: {}, {}, {:.2f}".format(int(rect[1][0]), int(rect[1][1]), min(rect[1])/(max(rect[1])+0.001))
'''=============== 显示文字 ===================='''
# label = [label_0, label_1, label_2, label_3, label_4, label_5, label_6, label_7, label_8]
# w, h = cv2.getTextSize('abc', 0, fontScale=2, thickness=1)[0]
# for i in range(len(label)):
# cv2.putText(img, label[i], (20, int((i+1)*1.1*h)), 0, 1,
# [int(x) for x in colorx[i]], 2, lineType=cv2.LINE_AA)
# pth = save_dir.joinpath(f"{file}_{track.tid}.png")
# cv2.imwrite(pth, img)
'''撰写专利需要,生成黑白图像'''
# imgbt = cv2.bitwise_not(img)
# for i in range(box.shape[0]):
# cv2.circle(imgbt, (int(cornpoints[i, 0]), int(cornpoints[i, 1])), 14, (0, 0, 0), 2)
# cv2.drawMarker(imgbt, (int(cornpoints[i, 2]), int(cornpoints[i, 3])), color= (0, 0, 0), markerType=3, markerSize = 30, thickness=2)
# cv2.drawMarker(imgbt, (int(cornpoints[i, 4]), int(cornpoints[i, 5])), color= (0, 0, 0), markerType=4, markerSize = 30, thickness=2)
# cv2.drawMarker(imgbt, (int(cornpoints[i, 6]), int(cornpoints[i, 7])), color= (0, 0, 0), markerType=5, markerSize = 30, thickness=2)
# cv2.drawMarker(imgbt, (int(cornpoints[i, 8]), int(cornpoints[i, 9])), color= (0, 0, 0), markerType=6, markerSize = 30, thickness=2)
# cv2.imwrite(pth + f"/zhuanli/{file}_{track.tid}.png", imgbt)
return img
def drawTrack(tracks, img):
# img = cv2.imread("./shopcart/cart_tempt/edgeline.png")
annotator = TrackAnnotator(img, line_width=2)
for track in tracks:
annotator.plotting_track(track.boxes)
img = annotator.result()
# pth = save_dir.joinpath(f"{filename}")
# cv2.imwrite(pth, img)
return img
if __name__ == "__main__":
y = np.array([5.0, 20, 40, 41, 42, 55, 56])

27
tracking/utils/gen.py Normal file
View File

@ -0,0 +1,27 @@
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 16 10:36:38 2024
@author: ym
"""
import contextlib
import time
class Profile(contextlib.ContextDecorator):
# YOLOv5 Profile class. Usage: @Profile() decorator or 'with Profile():' context manager
def __init__(self, t=0.0):
self.t = t
# self.cuda = torch.cuda.is_available()
def __enter__(self):
self.start = self.time()
return self
def __exit__(self, type, value, traceback):
self.dt = self.time() - self.start # delta-time
self.t += self.dt # accumulate dt
def time(self):
# if self.cuda:
# torch.cuda.synchronize()
return time.time()

View File

@ -0,0 +1,66 @@
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 31 17:07:09 2023
@author: ym
"""
from pathlib import Path
from types import SimpleNamespace
import re
import yaml
class IterableSimpleNamespace(SimpleNamespace):
"""
Ultralytics IterableSimpleNamespace is an extension class of SimpleNamespace that adds iterable functionality and
enables usage with dict() and for loops.
"""
def __iter__(self):
"""Return an iterator of key-value pairs from the namespace's attributes."""
return iter(vars(self).items())
def __str__(self):
"""Return a human-readable string representation of the object."""
return '\n'.join(f'{k}={v}' for k, v in vars(self).items())
def __getattr__(self, attr):
"""Custom attribute access error message with helpful information."""
name = self.__class__.__name__
raise AttributeError(f"""
'{name}' object has no attribute '{attr}'. This may be caused by a modified or out of date ultralytics
'default.yaml' file.\nPlease update your code with 'pip install -U ultralytics' and if necessary replace
DEFAULT_CFG_PATH with the latest version from
https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/default.yaml
""")
def get(self, key, default=None):
"""Return the value of the specified key if it exists; otherwise, return the default value."""
return getattr(self, key, default)
def yaml_load(file='data.yaml', append_filename=False):
"""
Load YAML data from a file.
Args:
file (str, optional): File name. Default is 'data.yaml'.
append_filename (bool): Add the YAML filename to the YAML dictionary. Default is False.
Returns:
(dict): YAML data and file name.
"""
assert Path(file).suffix in ('.yaml', '.yml'), f'Attempting to load non-YAML file {file} with yaml_load()'
with open(file, errors='ignore', encoding='utf-8') as f:
s = f.read() # string
# Remove special characters
if not s.isprintable():
s = re.sub(r'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]+', '', s)
# Add YAML filename to dict and return
data = yaml.safe_load(s) or {} # always return a dict (yaml.safe_load() may return None for empty files)
if append_filename:
data['yaml_file'] = str(file)
return data

View File

@ -0,0 +1,184 @@
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 23 11:04:48 2024
@author: ym
"""
import numpy as np
import cv2
from scipy.spatial.distance import cdist
# from trackers.utils import matching
# TracksDict
def readDict(boxes, TracksDict):
feats = []
for i in range(boxes.shape[0]):
tid, fid, bid = int(boxes[i, 4]), int(boxes[i, 7]), int(boxes[i, 8])
feat = TracksDict[f"frame_{fid}"]["feats"][bid]
img = TracksDict[f"frame_{fid}"]["imgs"][bid]
box = TracksDict[f"frame_{fid}"]["boxes"][bid]
assert (box[:4].astype(int) == boxes[i, :4].astype(int)).all(), f"Please check: frame_{fid}"
feats.append(feat)
# img = TracksDict[fid][f'{bid}_img']
# cv2.imwrite(f'./data/imgs/{tid}_{fid}_{bid}.png', img)
return np.asarray(feats, dtype=np.float32)
def track_equal_track(atrack, btrack, TracksDict):
# boxes: [x, y, w, h, track_id, score, cls, frame_index, box_index]
# 0 1 2 3 4 5 6 7 8
aboxes = atrack.boxes
bboxes = btrack.boxes
''' 1. 判断轨迹在时序上是否有交集 '''
afids = aboxes[:, 7].astype(np.int_)
bfids = bboxes[:, 7].astype(np.int_)
# 帧索引交集
interfid = set(afids).intersection(set(bfids))
# 或者直接判断帧索引是否有交集,返回 Ture or False
# interfid = set(afids).isdisjoint(set(bfids))
if len(interfid):
return False
''' 2. 轨迹特征相似度判断'''
afeat = readDict(aboxes, TracksDict)
bfeat = readDict(bboxes, TracksDict)
feat = np.concatenate((afeat, bfeat), axis=0)
emb_simil = 1-np.maximum(0.0, cdist(feat, feat, 'cosine'))
emb_ = 1-cdist(np.mean(afeat, axis=0)[None, :], np.mean(bfeat, axis=0)[None, :], 'cosine')
if emb_[0, 0]<0.66:
return False
''' 3. 轨迹空间iou'''
alabel = np.array([0] * afids.size, dtype=np.int_)
blabel = np.array([1] * bfids.size, dtype=np.int_)
label = np.concatenate((alabel, blabel), axis=0)
fids = np.concatenate((afids, bfids), axis=0)
indices = np.argsort(fids)
idx_pair = []
for i in range(len(indices)-1):
idx1, idx2 = indices[i], indices[i+1]
if label[idx1] != label[idx2] and fids[idx2] - fids[idx1] <= 3:
if label[idx1] == 0:
a_idx = idx1
b_idx = idx2-alabel.size
else:
a_idx = idx2
b_idx = idx1-alabel.size
idx_pair.append((a_idx, b_idx))
ious = []
for a, b in idx_pair:
abox, bbox = aboxes[a, :], bboxes[b, :]
xa1, ya1 = abox[0] - abox[2]/2, abox[1] - abox[3]/2
xa2, ya2 = abox[0] + abox[2]/2, abox[1] + abox[3]/2
xb1, yb1 = bbox[0] - bbox[2]/2, bbox[1] - bbox[3]/2
xb2, yb2 = bbox[0] + bbox[2]/2, bbox[1] + bbox[3]/2
inter = (np.minimum(xb2, xa2) - np.maximum(xb1, xa1)).clip(0) * \
(np.minimum(yb2, ya2) - np.maximum(yb1, ya1)).clip(0)
# Union Area
box1_area = abox[2] * abox[3]
box2_area = bbox[2] * bbox[3]
union = box1_area + box2_area - inter + 1e-6
ious.append(inter/union)
cont = False if len(interfid) else True
# cont2 = emb_[0, 0]>0.75
# cont3 = all(iou>0.5 for iou in ious)
# cont = cont and cont2 and cont3
return cont
def track_equal_str(atrack, btrack):
if atrack == btrack:
return True
else:
return False
def merge_track(Residual):
out_list = []
alist = [t for t in Residual]
while alist:
atrack = alist[0]
cur_list = []
cur_list.append(atrack)
alist.pop(0)
blist = [b for b in alist]
alist = []
for btrack in blist:
if track_equal_str(atrack, btrack):
cur_list.append(btrack)
else:
alist.append(btrack)
out_list.append(cur_list)
return out_list
def main():
Residual = ['a', 'b', 'c', 'd', 'a', 'b', 'c', 'b', 'c', 'd']
out_list = merge_track(Residual)
print(Residual)
print(out_list)
if __name__ == "__main__":
main()
# =============================================================================
# for i, atrack in enumerate(input_list):
# cur_list = []
# cur_list.append(atrack)
# del input_list[i]
#
# for j, btrack in enumerate(input_list):
# if track_equal(atrack, btrack):
# cur_list.append(btrack)
# del input_list[j]
#
# out_list.append(cur_list)
# =============================================================================

288
tracking/utils/plotting.py Normal file
View File

@ -0,0 +1,288 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
import contextlib
import math
import warnings
from pathlib import Path
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
import torch
from PIL import Image, ImageDraw, ImageFont
from PIL import __version__ as pil_version
# from utils.general import increment_path
# from ultralytics.utils import LOGGER, TryExcept, ops, plt_settings, threaded
# from .checks import check_font, check_version, is_ascii
# from .files import increment_path
class Colors:
"""
Ultralytics default color palette https://ultralytics.com/.
This class provides methods to work with the Ultralytics color palette, including converting hex color codes to
RGB values.
Attributes:
palette (list of tuple): List of RGB color values.
n (int): The number of colors in the palette.
pose_palette (np.array): A specific color palette array with dtype np.uint8.
"""
def __init__(self):
"""Initialize colors as hex = matplotlib.colors.TABLEAU_COLORS.values()."""
hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
'2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
self.palette = [self.hex2rgb(f'#{c}') for c in hexs]
self.n = len(self.palette)
self.pose_palette = np.array([[255, 128, 0], [255, 153, 51], [255, 178, 102], [230, 230, 0], [255, 153, 255],
[153, 204, 255], [255, 102, 255], [255, 51, 255], [102, 178, 255], [51, 153, 255],
[255, 153, 153], [255, 102, 102], [255, 51, 51], [153, 255, 153], [102, 255, 102],
[51, 255, 51], [0, 255, 0], [0, 0, 255], [255, 0, 0], [255, 255, 255]],
dtype=np.uint8)
def __call__(self, i, bgr=False):
"""Converts hex color codes to RGB values."""
c = self.palette[int(i) % self.n]
return (c[2], c[1], c[0]) if bgr else c
@staticmethod
def hex2rgb(h):
"""Converts hex color codes to RGB values (i.e. default PIL order)."""
return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
colors = Colors() # create instance for 'from utils.plots import colors'
class Annotator:
"""
Ultralytics Annotator for train/val mosaics and JPGs and predictions annotations.
Attributes:
im (Image.Image or numpy array): The image to annotate.
pil (bool): Whether to use PIL or cv2 for drawing annotations.
font (ImageFont.truetype or ImageFont.load_default): Font used for text annotations.
lw (float): Line width for drawing.
skeleton (List[List[int]]): Skeleton structure for keypoints.
limb_color (List[int]): Color palette for limbs.
kpt_color (List[int]): Color palette for keypoints.
"""
def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):
"""Initialize the Annotator class with image and line width along with color palette for keypoints and limbs."""
assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.'
self.im = im
self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width
# Pose
self.skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13], [6, 7], [6, 8], [7, 9],
[8, 10], [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]
self.limb_color = colors.pose_palette[[9, 9, 9, 9, 7, 7, 7, 0, 0, 0, 0, 0, 16, 16, 16, 16, 16, 16, 16]]
self.kpt_color = colors.pose_palette[[16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 9, 9, 9, 9, 9, 9]]
def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):
"""Add one xyxy box to image with label."""
if isinstance(box, torch.Tensor):
box = box.tolist()
p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA)
if label:
tf = max(self.lw - 1, 1) # font thickness
w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height
outside = p1[1] - h >= 3
p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3
cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled
cv2.putText(self.im,
label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2),
0,
self.lw / 3,
txt_color,
thickness=tf,
lineType=cv2.LINE_AA)
def masks(self, masks, colors, im_gpu, alpha=0.5, retina_masks=False):
"""
Plot masks on image.
Args:
masks (tensor): Predicted masks on cuda, shape: [n, h, w]
colors (List[List[Int]]): Colors for predicted masks, [[r, g, b] * n]
im_gpu (tensor): Image is in cuda, shape: [3, h, w], range: [0, 1]
alpha (float): Mask transparency: 0.0 fully transparent, 1.0 opaque
retina_masks (bool): Whether to use high resolution masks or not. Defaults to False.
"""
if self.pil:
# Convert to numpy first
self.im = np.asarray(self.im).copy()
if len(masks) == 0:
self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255
if im_gpu.device != masks.device:
im_gpu = im_gpu.to(masks.device)
colors = torch.tensor(colors, device=masks.device, dtype=torch.float32) / 255.0 # shape(n,3)
colors = colors[:, None, None] # shape(n,1,1,3)
masks = masks.unsqueeze(3) # shape(n,h,w,1)
masks_color = masks * (colors * alpha) # shape(n,h,w,3)
inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1)
mcs = masks_color.max(dim=0).values # shape(n,h,w,3)
im_gpu = im_gpu.flip(dims=[0]) # flip channel
im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3)
im_gpu = im_gpu * inv_alph_masks[-1] + mcs
im_mask = (im_gpu * 255)
im_mask_np = im_mask.byte().cpu().numpy()
self.im[:] = im_mask_np if retina_masks else scale_image(im_mask_np, self.im.shape)
if self.pil:
# Convert im back to PIL and update draw
self.fromarray(self.im)
def kpts(self, kpts, shape=(640, 640), radius=5, kpt_line=True):
"""
Plot keypoints on the image.
Args:
kpts (tensor): Predicted keypoints with shape [17, 3]. Each keypoint has (x, y, confidence).
shape (tuple): Image shape as a tuple (h, w), where h is the height and w is the width.
radius (int, optional): Radius of the drawn keypoints. Default is 5.
kpt_line (bool, optional): If True, the function will draw lines connecting keypoints
for human pose. Default is True.
Note: `kpt_line=True` currently only supports human pose plotting.
"""
if self.pil:
# Convert to numpy first
self.im = np.asarray(self.im).copy()
nkpt, ndim = kpts.shape
is_pose = nkpt == 17 and ndim == 3
kpt_line &= is_pose # `kpt_line=True` for now only supports human pose plotting
for i, k in enumerate(kpts):
color_k = [int(x) for x in self.kpt_color[i]] if is_pose else colors(i)
x_coord, y_coord = k[0], k[1]
if x_coord % shape[1] != 0 and y_coord % shape[0] != 0:
if len(k) == 3:
conf = k[2]
if conf < 0.5:
continue
cv2.circle(self.im, (int(x_coord), int(y_coord)), radius, color_k, -1, lineType=cv2.LINE_AA)
if kpt_line:
ndim = kpts.shape[-1]
for i, sk in enumerate(self.skeleton):
pos1 = (int(kpts[(sk[0] - 1), 0]), int(kpts[(sk[0] - 1), 1]))
pos2 = (int(kpts[(sk[1] - 1), 0]), int(kpts[(sk[1] - 1), 1]))
if ndim == 3:
conf1 = kpts[(sk[0] - 1), 2]
conf2 = kpts[(sk[1] - 1), 2]
if conf1 < 0.5 or conf2 < 0.5:
continue
if pos1[0] % shape[1] == 0 or pos1[1] % shape[0] == 0 or pos1[0] < 0 or pos1[1] < 0:
continue
if pos2[0] % shape[1] == 0 or pos2[1] % shape[0] == 0 or pos2[0] < 0 or pos2[1] < 0:
continue
cv2.line(self.im, pos1, pos2, [int(x) for x in self.limb_color[i]], thickness=2, lineType=cv2.LINE_AA)
if self.pil:
# Convert im back to PIL and update draw
self.fromarray(self.im)
def rectangle(self, xy, fill=None, outline=None, width=1):
"""Add rectangle to image (PIL-only)."""
self.draw.rectangle(xy, fill, outline, width)
def text(self, xy, text, txt_color=(255, 255, 255), anchor='top', box_style=False):
"""Adds text to an image using PIL or cv2."""
if anchor == 'bottom': # start y from font bottom
w, h = self.font.getsize(text) # text width, height
xy[1] += 1 - h
if self.pil:
if box_style:
w, h = self.font.getsize(text)
self.draw.rectangle((xy[0], xy[1], xy[0] + w + 1, xy[1] + h + 1), fill=txt_color)
# Using `txt_color` for background and draw fg with white color
txt_color = (255, 255, 255)
if '\n' in text:
lines = text.split('\n')
_, h = self.font.getsize(text)
for line in lines:
self.draw.text(xy, line, fill=txt_color, font=self.font)
xy[1] += h
else:
self.draw.text(xy, text, fill=txt_color, font=self.font)
else:
if box_style:
tf = max(self.lw - 1, 1) # font thickness
w, h = cv2.getTextSize(text, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height
outside = xy[1] - h >= 3
p2 = xy[0] + w, xy[1] - h - 3 if outside else xy[1] + h + 3
cv2.rectangle(self.im, xy, p2, txt_color, -1, cv2.LINE_AA) # filled
# Using `txt_color` for background and draw fg with white color
txt_color = (255, 255, 255)
tf = max(self.lw - 1, 1) # font thickness
cv2.putText(self.im, text, xy, 0, self.lw / 3, txt_color, thickness=tf, lineType=cv2.LINE_AA)
def fromarray(self, im):
"""Update self.im from a numpy array."""
self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
self.draw = ImageDraw.Draw(self.im)
def result(self):
"""Return annotated image as array."""
return np.asarray(self.im)
def scale_image(masks, im0_shape, ratio_pad=None):
"""
Takes a mask, and resizes it to the original image size
Args:
masks (np.ndarray): resized and padded masks/images, [h, w, num]/[h, w, 3].
im0_shape (tuple): the original image shape
ratio_pad (tuple): the ratio of the padding to the original image.
Returns:
masks (torch.Tensor): The masks that are being returned.
"""
# Rescale coordinates (xyxy) from im1_shape to im0_shape
im1_shape = masks.shape
if im1_shape[:2] == im0_shape[:2]:
return masks
if ratio_pad is None: # calculate from im0_shape
gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new
pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
top, left = int(pad[1]), int(pad[0]) # y, x
bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0])
if len(masks.shape) < 2:
raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}')
masks = masks[top:bottom, left:right]
masks = cv2.resize(masks, (im0_shape[1], im0_shape[0]))
if len(masks.shape) == 2:
masks = masks[:, :, None]
return masks
def boxing_img(det, img, line_width=3):
annotator = Annotator(img, line_width)
for *xyxy, id, conf, cls, _, _ in reversed(det):
label = (f'id:{int(id)} '+str(int(cls)) +f' {conf:.2f}')
if cls==0:
color = colors(int(cls), True)
else:
color = colors(int(id), True)
annotator.box_label(xyxy, label, color=color)
# Save results (image and video with tracking)
imgx = annotator.result()
return imgx

View File

@ -0,0 +1,94 @@
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 31 17:14:37 2023
@author: ym
"""
import numpy as np
class Boxes:
def __init__(self, boxes, orig_shape=None) -> None:
"""Initialize the Boxes class."""
if boxes.ndim == 1:
boxes = boxes[None, :]
n = boxes.shape[-1]
assert n in (6, 7, 8), f'expected `n` in [6, 7], but got {n}' # xyxyb, track_id, conf, cls
self.data = boxes
self.orig_shape = orig_shape
def cpu(self):
"""Return a copy of the tensor on CPU memory."""
return self if isinstance(self.data, np.ndarray) else self.__class__(self.data.cpu(), self.orig_shape)
def numpy(self):
"""Return a copy of the tensor as a numpy array."""
return self if isinstance(self.data, np.ndarray) else self.__class__(self.data.numpy(), self.orig_shape)
@property
def xyxy(self):
"""Return the boxes in xyxy format."""
return self.data[:, :4]
@property
def xyxyb(self):
"""Return the boxes in xyxyb format."""
return self.data[:, :5]
@property
def conf(self):
"""Return the confidence values of the boxes."""
return self.data[:, -2]
@property
def cls(self):
"""Return the class values of the boxes."""
return self.data[:, -1]
# def boxes_add_fid(tboxes):
# '''
# 将 bboxes 对应的帧索引添加到 boxes 最后一列
# Return
# bboxes: [x1, y1, x2, y2, track_id, score, cls, frame_index]
# '''
# bboxes = np.empty((0, 8), dtype = np.float32)
# for tbox, f in tboxes:
# data = tbox.numpy()
# frame = f * np.ones([data.shape[0], 1])
# bbox = np.concatenate([data, frame], axis=1)
# bboxes = np.concatenate([bboxes, bbox], axis=0)
# return bboxes
def boxes_add_fid(tboxes):
'''
将 bboxes 对应的帧索引添加到 boxes 最后一列
Return
bboxes: [x1, y1, x2, y2, track_id, score, cls, frame_index]
'''
bboxes = np.empty((0, 8), dtype = np.float32)
for data, f in tboxes:
frame = f * np.ones([data.shape[0], 1])
bbox = np.concatenate([data, frame], axis=1)
bboxes = np.concatenate([bboxes, bbox], axis=0)
return bboxes

View File

@ -0,0 +1,118 @@
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 20 14:28:20 2023
@author: ym
"""
import numpy as np
from scipy.spatial.distance import cdist
def boxes_add_fid(tboxes):
'''
将 bboxes 对应的帧索引添加到 boxes 最后一列
Return
bboxes: [x1, y1, x2, y2, track_id, score, cls, frame_index]
'''
bboxes = np.empty((0, 8), dtype = np.float32)
for tbox, f in tboxes:
data = tbox.numpy()
frame = f * np.ones([data.shape[0], 1])
bbox = np.concatenate([data, frame], axis=1)
bboxes = np.concatenate([bboxes, bbox], axis=0)
return bboxes
def array2list(bboxes):
'''
将 bboxes 变换为 track 列表
bboxes: [x1, y1, x2, y2, track_id, score, cls, frame_index]
Return
lboxes列表列表中元素具有同一 track_idxywh 格式
[x, y, w, h, track_id, score, cls, frame_index]
'''
track_ids = set(bboxes[:, 4])
lboxes = []
for t_id in track_ids:
idx = np.where(bboxes[:, 4] == t_id)[0]
box = bboxes[idx, :]
x = (box[:, 0] + box[:, 2]) / 2
y = (box[:, 1] + box[:, 3]) / 2
# box: [x, y, w, h, track_id, score, cls, frame_index]
box[:, 2] = box[:, 2] - box[:, 0]
box[:, 3] = box[:, 3] - box[:, 1]
box[:, 0] = x
box[:, 1] = y
lboxes.append(box)
return lboxes
def max_dist_track(tboxes):
'''
计算 tboxes 中最大dist的 track
Return
'''
max_track_dist, max_dist = 0, 0
for track in tboxes:
box = track[:, :4].astype(int)
dist = cdist(box[:, :2], box[:, :2])
dm = np.max(dist)
if dm > max_dist:
max_dist = dm
max_track = track.copy()
max_track_dist = dist.copy()
# 同一 track_id 中目标中心移动最大距离的索引ix1, ix2
indx, indy = np.where(dist == dm)
ix1, ix2 = indx[0], indy[0]
# 确保 ix1 < ix2索引 ix1 是开始时的视频
if ix1 > ix2: ix1, ix2 = ix2, ix1
# =============================================================================
# # =============================================================================
# # 逻辑分析
# # =============================================================================
# Scanzone = ((0, int(Height/4)), (int(2*Weight/3), Weight))
# if max_track.shape[0] > 10:
#
# # max_track 视频序列的第一帧索引 idx1
# frame_1 = int(min(max_track[:, 7]))
# idx1 = np.where(max_track[:, 7] == frame_1)[0][0]
#
# # max_track 视频序列的最后一帧索引 idx2
# frame_2 = int(max(max_track[:, 7]))
# idx2 = np.where(max_track[:, 7] == frame_2)[0][0]
#
# # max_track 视频序列的第一帧目标位置中心 (x1, y1)
# x1, y1 = max_track[idx1, :2]
#
# # max_track 视频序列的第最后一帧目标位置中心 (x2, y2)
# x2, y2 = max_track[idx2, :2]
#
#
# # track序列第一帧和最后一帧的距离该距离和 mx_dist 不是一个概念
# dist_1_2 = max_track_dist[idx1, idx2]
#
# if max_dist < 3 * Height/10:
# State = Uncertain
#
# elif y1 > y2:
# State = TakeOut
#
# elif y1 < y2:
# State = PutIn
# =============================================================================
return max_track, max_dist

Some files were not shown because too many files have changed in this diff Show More