from utils.embedding import DataProcessing as dp from floder.config import cfg import cv2 as cv from utils.filter import filt from network.vanalysis_video import vanalysis, raft_init_model import argparse from utils.detect import opvideo parser = argparse.ArgumentParser() #parser.add_argument('--model', default='../module/ieemoo-ai-search/model/now/raft-things.pth',help="restore checkpoint") parser.add_argument('--model', default='./checkpoint/raft-small.pth',help="restore checkpoint") #parser.add_argument('--small', action='store_true', help='use small model') parser.add_argument('--small', type=bool, default=True, help='use small model') parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision') parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation') opt, unknown = parser.parse_known_args() backbone = 'mobilevit' modelPath = cfg.model_path device = 'cuda' dps = dp(backbone, modelPath, device) flowmodel = raft_init_model(opt) opv = opvideo(flowmodel , dps) if __name__ == '__main__': video1 = './imgs/1.mp4' video2 = './imgs/2.mp4' video3 = './imgs/3.mp4' video4 = './imgs/4.mp4' opv.addFeature('test', 0, video1) opv.addFeature('test', 1, video2) opv.addFeature('test', 2, video3) result = opv.opFeature('test', 3, video4) print('result>>>>> {}'.format(result))