diff --git a/testsingle.py b/testsingle.py index a6f574f..38b3a52 100755 --- a/testsingle.py +++ b/testsingle.py @@ -13,12 +13,12 @@ import time #模型测试单张图片 parser = argparse.ArgumentParser() parser.add_argument("--dataset", choices=["emptyJudge5"], default="emptyJudge5", help="Which dataset.") -parser.add_argument("--img_size", default=320, type=int, help="Resolution size") +parser.add_argument("--img_size", default=600, type=int, help="Resolution size") parser.add_argument('--split', type=str, default='overlap', help="Split method") # non-overlap parser.add_argument('--slide_step', type=int, default=12, help="Slide step for overlap split") parser.add_argument('--smoothing_value', type=float, default=0.0, help="Label smoothing value\n") -parser.add_argument("--pretrained_model", type=str, default="../module/ieemoo-ai-isempty/model/now/emptyjudge5_checkpoint.bin", help="load pretrained model") -#parser.add_argument("--pretrained_model", type=str, default="output/ieemooempty_vit_checkpoint.pth", help="load pretrained model") #使用自定义VIT +#parser.add_argument("--pretrained_model", type=str, default="../module/ieemoo-ai-isempty/model/now/emptyjudge5_checkpoint.bin", help="load pretrained model") +parser.add_argument("--pretrained_model", type=str, default="output/ieemooempty_vit_checkpoint.pth", help="load pretrained model") #使用自定义VIT args = parser.parse_args() args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") @@ -40,7 +40,7 @@ if args.pretrained_model is not None: model.to(args.device) model.eval() -test_transform = transforms.Compose([transforms.Resize((320, 320), Image.BILINEAR), +test_transform = transforms.Compose([transforms.Resize((600, 600), Image.BILINEAR), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])