import sys sys.path.append('RAFT/core') import argparse import glob, cv2, os, pdb, time import numpy as np import torch from PIL import Image import time from raft import RAFT from RAFT.core.utils import flow_viz from RAFT.core.utils.utils import InputPadder from utils.tools import EvaluteMap,ManagingFeature from utils.config import cfg from utils.updateObs import Addimg_content from utils.retrieval_feature import AntiFraudFeatureDataset DEVICE = 'cuda' global Result pre_area = 0 kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) # 定义膨胀结构元素 kernel1 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2)) # 定义腐蚀结构元素 def load_image(imfile): #img = np.array(Image.open(imfile)).astype(np.uint8) img = np.array(imfile).astype(np.uint8) img = torch.from_numpy(img).permute(2, 0, 1).float() return img[None].to(DEVICE) def viz(img, flo): img = img[0].permute(1,2,0).cpu().numpy() flo = flo[0].permute(1,2,0).cpu().numpy() flo = flow_viz.flow_to_image(flo) return flo def raft_init_model(args): model = torch.nn.DataParallel(RAFT(args)) model.load_state_dict(torch.load(args.model)) model = model.module model.to(DEVICE) model.eval() return model def analysis_video(model, video_path, result_path, uuid_barcode, maskpath=None, net=None, transform=None, ms=None, match=True): imfile1, imfile2 = None,None affd = AntiFraudFeatureDataset() barcode = uuid_barcode.split('_')[-1] search_r = ManagingFeature().getfeature(barcode) #print('search_r>>>>>>>>', len(search_r)) ori_mask = cv2.imread(maskpath, 0) nn, nu, result = 0, 1, None Result = '03' img_dic = {} ex_ocrList, resultList = [],[] flag = True fgbg = cv2.createBackgroundSubtractorMOG2(history=500, varThreshold=20, detectShadows = False) #oriimg = cv2.imread(cfg.fgbgmask) with torch.no_grad(): capture = cv2.VideoCapture(video_path) ret,frame = capture.read(0) if frame.shape[0]cfg.NUM_RAFT: flag = True if nn>100 and nu>2: flag = True else: if result is not None: nu += 1 if nu>cfg.NUM_RAFT: flag = True if nn>100 and nu>2: flag = True return flag, nu, Result def get_target(path, img, ori_img, nu, ori_mask, uuid_barcode, MASKIMG): global pre_area img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) ret, mask = cv2.threshold(img, 249, 255, cv2.THRESH_BINARY) mask_max_area, mask_max_contour = 0, 0 mask = cv2.bitwise_not(mask) mask_image = np.zeros((ori_img.shape[0], ori_img.shape[1], 1), np.uint8) if (cv2.__version__).split('.')[0] == '3': _, contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) else: contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) if len(contours)>100: return None, '_' for contour in contours: mask_area_now = cv2.contourArea(contour) if mask_area_now > mask_max_area: mask_max_area = mask_area_now mask_max_contour = contour if mask_max_area == 0 :return None, '_' #mask_max_area 目标位的面积 (x, y, w, h) = cv2.boundingRect(mask_max_contour) if (w*h)/(img.shape[0]*img.shape[1])>0.80: return None, '_' if min(w,h) <100 or max(w,h)>1000: return None, '_' coordination = [x, y, x + w, y + h] mask_image = cv2.fillPoly(mask_image, [mask_max_contour], (255)) if pre_area==0: pre_area = mask_max_area return None, '_' else: if abs(mask_max_area-pre_area)/pre_area > 0.4: pre_area = mask_max_area #print('abs:',abs(mask_max_area-pre_area)/pre_area) return None, '_' else: pre_area = mask_max_area A,B,C = mask_image, mask_image, mask_image mask_image = cv2.merge([A,B,C]) #该方法去除框外干扰 if not get_iou_ratio(mask_image, MASKIMG): return None, '_' show = cv2.bitwise_and(ori_img, mask_image) #show = show[coordination[1]:coordination[3], coordination[0]:coordination[2]] show = ori_img[coordination[1]:coordination[3], coordination[0]:coordination[2]] #cv2.imwrite(os.sep.join([cfg.Ocrimg, str(nu-1)+'_'+uuid_barcode+'.jpg']), show) return show, coordination def get_iou_ratio(oimg, MASKIMG): mimg = cv2.imread(MASKIMG) iimg = cv2.bitwise_and(oimg, mimg) iimgarea = get_area(iimg) oimgarea = get_area(oimg) if iimgarea/oimgarea < 0.1: return False else: return True def get_area(img): kernel = np.ones((3, 3), dtype=np.uint8) img = cv2.dilate(img, kernel, 1) img = cv2.erode(img, kernel, 1) maxcontour, nu = 0,0 contours, _ = cv2.findContours(img[:,:,1] ,cv2.RETR_TREE , cv2.CHAIN_APPROX_NONE) if len(contours) == 0: return 0 for i in range(len(contours)): if maxcontour < len(contours[i]): maxcontour = len(contours[i]) nu = i area = cv2.contourArea(contours[nu]) return area def img_filter(frame, oriimg, fgbg, nn): dic,dics = {},{} iouArea = 0 frame = cv2.GaussianBlur(frame, (5, 5), 0) height, width = frame.shape[:2] frame = cv2.resize(frame, (int(width/2), int(height/2)), interpolation=cv2.INTER_CUBIC) # 计算前景掩码 fgmask = fgbg.apply(frame) draw1 = cv2.threshold(fgmask, 25, 255, cv2.THRESH_BINARY)[1] draw1 = cv2.erode(draw1, kernel1, iterations=2) draw1 = cv2.dilate(draw1, kernel, iterations=1) if nn==2: return True draw1 = cv2.bitwise_and(oriimg[:, :, 0], draw1) contours_m, hierarchy_m = cv2.findContours(draw1.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for contour in contours_m: dics[len(contour)] = contour if len(dics.keys())>0: cc = sorted(dics.keys()) iouArea = cv2.contourArea(dics[cc[-1]]) #if iouArea>10000 and iouArea<40000: if iouArea>3000 and iouArea<50000: return False return True if __name__ == '__main__': model = raft_init_model() from utils.tools import createNet net, transform, ms = createNet() video_path = '../data/videos/20220625-094651_37dd99b0-520d-457b-8615-efdb7f53b5b4_6907992825762.mp4'#video_path uuid_barcode = '6907992825762' analysis = analysis_video(model=model, video_path=video_path, result_path='', uuid_barcode=uuid_barcode, maskpath=None, net=net, transform=transform, ms=ms) # analysis_video(model, video_path, result_path)