first commit
This commit is contained in:
0
utils/__init__.py
Normal file
0
utils/__init__.py
Normal file
47
utils/detect.py
Normal file
47
utils/detect.py
Normal file
@ -0,0 +1,47 @@
|
||||
from network.vanalysis_video import vanalysis
|
||||
from utils.filter import filt
|
||||
import cv2 as cv
|
||||
from floder.config import cfg
|
||||
from utils.embedding import DataProcessing as dp
|
||||
from utils.opfile import writef, readf
|
||||
import numpy as np
|
||||
from random import choice
|
||||
|
||||
class opvideo:
|
||||
def __init__(self, flowmodel, dps):
|
||||
self.flowmodel = flowmodel
|
||||
self.dps = dps
|
||||
|
||||
def addFeature(self, uuid, num_id, video):
|
||||
imglist = filt(video)
|
||||
imgs = vanalysis(self.flowmodel, imglist)
|
||||
feature = self.dps.getFeatures(imgs)
|
||||
writef(uuid, num_id, feature)
|
||||
|
||||
def opFeature(self, uuid, finalnum, video):
|
||||
videoFeature = []
|
||||
self.addFeature(uuid, finalnum, video)
|
||||
for num_id in range(0, finalnum):
|
||||
feature = readf(uuid, num_id)
|
||||
videoFeature.append(feature)
|
||||
redic = self.opVideFeature(videoFeature)
|
||||
#print(redic)
|
||||
return redic
|
||||
|
||||
def opVideFeature(self, videoFeature):
|
||||
redic = {}
|
||||
stalist = list(range(0, len(videoFeature)))
|
||||
for nu in stalist:
|
||||
dylist = list(range(0, len(videoFeature)))
|
||||
dylist.remove(nu)
|
||||
for nn in dylist:
|
||||
nn_tmp = []
|
||||
cosin_re = self.dps.cal_cosine(
|
||||
videoFeature[nu],
|
||||
videoFeature[nn])
|
||||
if (sum(i<0.86 for i in cosin_re))>0:
|
||||
redic[nu] = False
|
||||
else:
|
||||
redic[nu] = True
|
||||
break
|
||||
return redic
|
63
utils/embedding.py
Normal file
63
utils/embedding.py
Normal file
@ -0,0 +1,63 @@
|
||||
from network.createNet import initnet
|
||||
import cv2, torch
|
||||
import numpy as np
|
||||
|
||||
class DataProcessing():
|
||||
def __init__(self, backbone, model_path, device):
|
||||
model = initnet(backbone)
|
||||
model.load_state_dict(torch.load(model_path))
|
||||
model.to(torch.device(device))
|
||||
model.eval()
|
||||
self.model = model
|
||||
self.device = device
|
||||
|
||||
def cosin_metric(self, x1, x2):
|
||||
if not len(x1)==len(x2):
|
||||
return 100
|
||||
return np.dot(x1, x2) / (np.linalg.norm(x1) * np.linalg.norm(x2))
|
||||
|
||||
def load_image(self, image):
|
||||
#image = cv2.imread(image)
|
||||
if image is None:
|
||||
return None
|
||||
image = cv2.resize(image, (256, 256))
|
||||
image = image.transpose((2, 0, 1))
|
||||
image = image[np.newaxis, :, :, :]
|
||||
image = image.astype(np.float32, copy=False)
|
||||
return image
|
||||
|
||||
def getFeatures(self, imgs): #<< input type is np
|
||||
images = None
|
||||
features = []
|
||||
assert (type(imgs) is list), 'Err input need list'
|
||||
for i, img in enumerate(imgs):
|
||||
#print('imgs >>> {}{}'.format(type(img), type(img)))
|
||||
image = self.load_image(img)
|
||||
if image is None:
|
||||
print('read {} error'.format(img_path))
|
||||
else:
|
||||
data = torch.from_numpy(image)
|
||||
data = data.to(torch.device(self.device))
|
||||
output = self.model(data)
|
||||
output = output.data.cpu().numpy()
|
||||
features.append(output)
|
||||
return features # >>>>>>> return type is list
|
||||
|
||||
def cal_cosine(self, t_features, m_features): # Calculate the cosine angular distance
|
||||
if not (type(m_features) is list or np.ndarray):
|
||||
return 'Err m_features need list or ndarray'
|
||||
elif (type(t_features) is list or np.ndarray):
|
||||
cosin_re = []
|
||||
for tf in t_features:
|
||||
for mf in m_features:
|
||||
#print('tf >> {} tf>>{} mf>>{} mf>>{}'.format(tf, type(tf), len(mf), type(mf)))
|
||||
if type(mf) is list:
|
||||
cosin_re.append(self.cosin_metric(tf.reshape(-1), mf))
|
||||
else:
|
||||
cosin_re.append(self.cosin_metric(tf.reshape(-1), mf.reshape(-1)))
|
||||
else:
|
||||
cosin_re = []
|
||||
for mf in m_features:
|
||||
cosin_re.append(self.cosin_metric(t_features.reshape(-1), mf.reshape(-1)))
|
||||
return cosin_re
|
||||
|
55
utils/filter.py
Normal file
55
utils/filter.py
Normal file
@ -0,0 +1,55 @@
|
||||
import cv2
|
||||
from floder.config import cfg
|
||||
def filt(video_path):
|
||||
#mask_path = '../../module/ieemoo-ai-search/model/now/ori_old.jpg'
|
||||
maskimg = cv2.imread(cfg.maskImg)
|
||||
fgbg = cv2.createBackgroundSubtractorMOG2(history=500, varThreshold=20, detectShadows = False)
|
||||
capture = cv2.VideoCapture(video_path)
|
||||
ret,frame = capture.read(0)
|
||||
if frame.shape[0]<frame.shape[1]:
|
||||
maskimg = cv2.imread(cfg.maskImg)
|
||||
else:
|
||||
maskimg = cv2.imread(cfg.maskImg1)
|
||||
print('capture >>>> {}'.format(frame.shape))
|
||||
imglist = []
|
||||
re = False
|
||||
nn = 0
|
||||
while True:
|
||||
ret,frame = capture.read()
|
||||
nn += 1
|
||||
#print('>>>>{}'.format(nn))
|
||||
if not ret:break
|
||||
if not re:
|
||||
re = img_filter(frame, maskimg, fgbg)
|
||||
else:
|
||||
imglist.append(frame)
|
||||
#cv2.imwrite('./imgs/tmpimgs/'+str(nn)+'.jpg', frame)
|
||||
if len(imglist) > 30:
|
||||
break
|
||||
return imglist #-->list imgs
|
||||
|
||||
def img_filter(frame, maskimg, fgbg):
|
||||
dic,dics = {},{}
|
||||
iouArea = 0
|
||||
|
||||
height, width = frame.shape[:2]
|
||||
frame = cv2.resize(frame, (int(width/2), int(height/2)), interpolation=cv2.INTER_CUBIC)
|
||||
|
||||
fgmask = fgbg.apply(frame)
|
||||
draw1 = cv2.threshold(fgmask, 25, 255, cv2.THRESH_BINARY)[1]
|
||||
|
||||
draw1 = cv2.bitwise_and(maskimg[:, :, 0], draw1)
|
||||
contours_m, hierarchy_m = cv2.findContours(draw1.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
||||
for contour in contours_m:
|
||||
dics[len(contour)] = contour
|
||||
if len(dics.keys())>0:
|
||||
cc = sorted(dics.keys())
|
||||
iouArea = cv2.contourArea(dics[cc[-1]])
|
||||
if iouArea>3000 and iouArea<50000:
|
||||
return True
|
||||
return False
|
||||
|
||||
if __name__ == '__main__':
|
||||
videoName = 'filterImg.mp4'
|
||||
filt(videoName)
|
||||
|
0
utils/initialize.py
Normal file
0
utils/initialize.py
Normal file
24
utils/opfile.py
Normal file
24
utils/opfile.py
Normal file
@ -0,0 +1,24 @@
|
||||
import h5py, os
|
||||
from floder.config import cfg
|
||||
|
||||
def writef(uuid, num_id, feature):
|
||||
fname = os.sep.join([cfg.hFile, uuid+'.h5'])
|
||||
if not os.path.exists(fname):
|
||||
f = h5py.File(fname, 'w')
|
||||
f[str(num_id)] = feature
|
||||
else:
|
||||
f = h5py.File(fname, 'a')
|
||||
f[str(num_id)] = feature
|
||||
print('>>>>>>>have been write')
|
||||
f.close()
|
||||
|
||||
def readf(uuid, num_id):
|
||||
fname = os.sep.join([cfg.hFile, uuid+'.h5'])
|
||||
f = h5py.File(fname, 'r')
|
||||
value = f[str(num_id)][:]
|
||||
f.close()
|
||||
return value
|
||||
|
||||
def removef(uuid):
|
||||
fname = os.sep.join([cfg.hFile, uuid+'.h5'])
|
||||
os.remove(fname)
|
0
utils/save.py
Normal file
0
utils/save.py
Normal file
Reference in New Issue
Block a user