update
This commit is contained in:
75
predict.py
75
predict.py
@ -9,24 +9,22 @@ from sklearn.metrics import f1_score
|
||||
from PIL import Image
|
||||
from torchvision import transforms
|
||||
from models.modeling import VisionTransformer, CONFIGS
|
||||
import lightrise
|
||||
|
||||
#模型预测
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--img_size", default=600, type=int, help="Resolution size")
|
||||
parser.add_argument("--img_size", default=448, type=int, help="Resolution size")
|
||||
parser.add_argument('--split', type=str, default='overlap', help="Split method") # non-overlap
|
||||
parser.add_argument('--slide_step', type=int, default=2, help="Slide step for overlap split")
|
||||
parser.add_argument('--slide_step', type=int, default=12, help="Slide step for overlap split")
|
||||
parser.add_argument('--smoothing_value', type=float, default=0.0, help="Label smoothing value\n")
|
||||
parser.add_argument("--pretrained_model", type=str, default="../module/ieemoo-ai-isempty/model/new/ieemooempty_vit_checkpoint.pth", help="load pretrained model")
|
||||
#parser.add_argument("--pretrained_model", type=str, default="output/ieemooempty_vit_checkpoint.pth", help="load pretrained model") #使用自定义VIT
|
||||
parser.add_argument("--pretrained_model", type=str, default="output/emptyjudge5_checkpoint.bin", help="load pretrained model")
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
class Predictor(object):
|
||||
def __init__(self, args):
|
||||
self.args = args
|
||||
self.args.device = torch.device("cuda")
|
||||
self.args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
print("self.args.device =", self.args.device)
|
||||
self.args.nprocs = torch.cuda.device_count()
|
||||
|
||||
@ -34,7 +32,7 @@ class Predictor(object):
|
||||
self.num_classes = 0
|
||||
self.model = None
|
||||
self.prepare_model()
|
||||
self.test_transform = transforms.Compose([transforms.Resize((600, 600), Image.BILINEAR),
|
||||
self.test_transform = transforms.Compose([transforms.Resize((448, 448), Image.BILINEAR),
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
|
||||
|
||||
@ -42,14 +40,28 @@ class Predictor(object):
|
||||
config = CONFIGS["ViT-B_16"]
|
||||
config.split = self.args.split
|
||||
config.slide_step = self.args.slide_step
|
||||
self.num_classes = 5
|
||||
self.cls_dict = {0: "noemp", 1: "yesemp"}
|
||||
|
||||
model_name = os.path.basename(self.args.pretrained_model).replace("_checkpoint.bin", "")
|
||||
print("use model_name: ", model_name)
|
||||
if model_name.lower() == "emptyJudge5".lower():
|
||||
self.num_classes = 5
|
||||
self.cls_dict = {0: "noemp", 1: "yesemp", 2: "hard", 3: "fly", 4: "stack"}
|
||||
elif model_name.lower() == "emptyJudge4".lower():
|
||||
self.num_classes = 4
|
||||
self.cls_dict = {0: "noemp", 1: "yesemp", 2: "hard", 3: "stack"}
|
||||
elif model_name.lower() == "emptyJudge3".lower():
|
||||
self.num_classes = 3
|
||||
self.cls_dict = {0: "noemp", 1: "yesemp", 2: "hard"}
|
||||
elif model_name.lower() == "emptyJudge2".lower():
|
||||
self.num_classes = 2
|
||||
self.cls_dict = {0: "noemp", 1: "yesemp"}
|
||||
self.model = VisionTransformer(config, self.args.img_size, zero_head=True, num_classes=self.num_classes, smoothing_value=self.args.smoothing_value)
|
||||
|
||||
if self.args.pretrained_model is not None:
|
||||
self.model = torch.load(self.args.pretrained_model,map_location='cpu')
|
||||
|
||||
if not torch.cuda.is_available():
|
||||
pretrained_model = torch.load(self.args.pretrained_model, map_location=torch.device('cpu'))['model']
|
||||
self.model.load_state_dict(pretrained_model)
|
||||
else:
|
||||
pretrained_model = torch.load(self.args.pretrained_model)['model']
|
||||
self.model.load_state_dict(pretrained_model)
|
||||
self.model.to(self.args.device)
|
||||
self.model.eval()
|
||||
|
||||
@ -61,7 +73,9 @@ class Predictor(object):
|
||||
"Image file failed to read: {}".format(img_path))
|
||||
else:
|
||||
x = self.test_transform(img)
|
||||
part_logits = self.model(x.unsqueeze(0).to(args.device))
|
||||
if torch.cuda.is_available():
|
||||
x = x.cuda()
|
||||
part_logits = self.model(x.unsqueeze(0))
|
||||
probs = torch.nn.Softmax(dim=-1)(part_logits)
|
||||
topN = torch.argsort(probs, dim=-1, descending=True).tolist()
|
||||
clas_ids = topN[0][0]
|
||||
@ -75,12 +89,8 @@ if __name__ == "__main__":
|
||||
|
||||
y_true = []
|
||||
y_pred = []
|
||||
test_dir = "./emptyJudge5/images/"
|
||||
test_dir = "/data/pfc/fineGrained/test_5cls"
|
||||
dir_dict = {"noemp":"0", "yesemp":"1", "hard": "2", "fly": "3", "stack": "4"}
|
||||
|
||||
# test_dir = "../emptyJudge2/images"
|
||||
# dir_dict = {"noempty":"0", "empty":"1"}
|
||||
|
||||
total = 0
|
||||
num = 0
|
||||
t0 = time.time()
|
||||
@ -96,19 +106,6 @@ if __name__ == "__main__":
|
||||
cur_pred, pred_score = predictor.normal_predict(cur_img_file)
|
||||
|
||||
label = 0 if 2 == int(label) or 3 == int(label) or 4 == int(label) else int(label)
|
||||
|
||||
riseresult = lightrise.riseempty(Image.open(cur_img_file))
|
||||
if(label==1):
|
||||
if(int(riseresult["rst_cls"])==1):
|
||||
label=1
|
||||
else:
|
||||
label=0
|
||||
# else:
|
||||
# if(riseresult["rst_cls"]==0):
|
||||
# label=0
|
||||
# else:
|
||||
# label=1
|
||||
|
||||
cur_pred = 0 if 2 == int(cur_pred) or 3 == int(cur_pred) or 4 == int(cur_pred) else int(cur_pred)
|
||||
y_true.append(int(label))
|
||||
y_pred.append(int(cur_pred))
|
||||
@ -128,18 +125,6 @@ if __name__ == "__main__":
|
||||
print(rst_C)
|
||||
print(rst_f1)
|
||||
|
||||
'''
|
||||
所有数据集
|
||||
|
||||
The cast of time is :160.738966 seconds
|
||||
The classification accuracy is 0.986836
|
||||
[[4923 58]
|
||||
[ 34 1974]]
|
||||
0.9839851634589902
|
||||
|
||||
'''
|
||||
|
||||
|
||||
'''
|
||||
test_imgs: yesemp=145, noemp=453 大图
|
||||
|
||||
|
Reference in New Issue
Block a user