From 5c21167991d3f4f1f156acdec66cd466cd98d97c Mon Sep 17 00:00:00 2001 From: Brainway Date: Wed, 26 Oct 2022 15:43:05 +0000 Subject: [PATCH] update testsingle.py. --- testsingle.py | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/testsingle.py b/testsingle.py index 817bea7..a6f574f 100755 --- a/testsingle.py +++ b/testsingle.py @@ -12,13 +12,13 @@ import time #模型测试单张图片 parser = argparse.ArgumentParser() -parser.add_argument("--dataset", choices=["emptyJudge2"], default="emptyJudge2", help="Which dataset.") -parser.add_argument("--img_size", default=600, type=int, help="Resolution size") +parser.add_argument("--dataset", choices=["emptyJudge5"], default="emptyJudge5", help="Which dataset.") +parser.add_argument("--img_size", default=320, type=int, help="Resolution size") parser.add_argument('--split', type=str, default='overlap', help="Split method") # non-overlap -parser.add_argument('--slide_step', type=int, default=2, help="Slide step for overlap split") +parser.add_argument('--slide_step', type=int, default=12, help="Slide step for overlap split") parser.add_argument('--smoothing_value', type=float, default=0.0, help="Label smoothing value\n") -#parser.add_argument("--pretrained_model", type=str, default="../module/ieemoo-ai-isempty/model/now/emptyjudge5_checkpoint.bin", help="load pretrained model") -parser.add_argument("--pretrained_model", type=str, default="output/ieemooempty_vit_checkpoint.pth", help="load pretrained model") #使用自定义VIT +parser.add_argument("--pretrained_model", type=str, default="../module/ieemoo-ai-isempty/model/now/emptyjudge5_checkpoint.bin", help="load pretrained model") +#parser.add_argument("--pretrained_model", type=str, default="output/ieemooempty_vit_checkpoint.pth", help="load pretrained model") #使用自定义VIT args = parser.parse_args() args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") @@ -30,17 +30,17 @@ config.split = args.split config.slide_step = args.slide_step num_classes = 5 -cls_dict = {0: "noemp", 1: "yesemp"} +cls_dict = {0: "noemp", 1: "yesemp", 2: "hard", 3: "fly", 4: "stack"} model = None #model = VisionTransformer(config, args.img_size, zero_head=True, num_classes=num_classes, smoothing_value=args.smoothing_value) if args.pretrained_model is not None: - model = torch.load(args.pretrained_model) #自己预训练模型 + model = torch.load(args.pretrained_model,map_location=torch.device('cpu')) #自己预训练模型 model.to(args.device) model.eval() -test_transform = transforms.Compose([transforms.Resize((600, 600), Image.BILINEAR), +test_transform = transforms.Compose([transforms.Resize((320, 320), Image.BILINEAR), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) @@ -71,6 +71,14 @@ print("Prediction Label\n") for idx in top5[0, :5]: print(f'{probs[0, idx.item()]:.5f} : {cls_dict[idx.item()]}', end='\n') +clas_ids = top5[0][0] +clas_ids = 0 if 0==int(clas_ids) or 2 == int(clas_ids) or 3 == int(clas_ids) else 1 +print("cur_img result: class id: %d, score: %0.3f" % (clas_ids, probs[0, clas_ids].item())) +result={} +result["success"] = "true" +result["rst_cls"] = str(clas_ids) +print(result) + endtime = time.process_time() print("Time cost:"+ str(endtime - startime)) #评估一张图片耗时2.8秒