更新 detacttracking
This commit is contained in:
Binary file not shown.
87
detecttracking/contrast/feat_extract/config.py
Normal file
87
detecttracking/contrast/feat_extract/config.py
Normal file
@ -0,0 +1,87 @@
|
||||
# import torch
|
||||
# import torchvision.transforms as T
|
||||
#
|
||||
#
|
||||
# class Config:
|
||||
# # network settings
|
||||
# backbone = 'resnet18' # [resnet18, mobilevit_s, mobilenet_v2, mobilenetv3_small, mobilenetv3_large, mobilenet_v1, PPLCNET_x1_0, PPLCNET_x0_5, PPLCNET_x2_5]
|
||||
# metric = 'arcface' # [cosface, arcface]
|
||||
# cbam = True
|
||||
# embedding_size = 256
|
||||
# drop_ratio = 0.5
|
||||
# img_size = 224
|
||||
#
|
||||
# batch_size = 8
|
||||
#
|
||||
# # data preprocess
|
||||
# # input_shape = [1, 128, 128]
|
||||
# """transforms.RandomCrop(size),
|
||||
# transforms.RandomVerticalFlip(p=0.5),
|
||||
# transforms.RandomHorizontalFlip(),
|
||||
# RandomRotate(15, 0.3),
|
||||
# # RandomGaussianBlur()"""
|
||||
#
|
||||
# train_transform = T.Compose([
|
||||
# T.ToTensor(),
|
||||
# T.Resize((img_size, img_size)),
|
||||
# # T.RandomCrop(img_size),
|
||||
# # T.RandomHorizontalFlip(p=0.5),
|
||||
# T.RandomRotation(180),
|
||||
# T.ColorJitter(brightness=0.5),
|
||||
# T.ConvertImageDtype(torch.float32),
|
||||
# T.Normalize(mean=[0.5], std=[0.5]),
|
||||
# ])
|
||||
# test_transform = T.Compose([
|
||||
# T.ToTensor(),
|
||||
# T.Resize((img_size, img_size)),
|
||||
# T.ConvertImageDtype(torch.float32),
|
||||
# T.Normalize(mean=[0.5], std=[0.5]),
|
||||
# ])
|
||||
#
|
||||
# # dataset
|
||||
# train_root = './data/2250_train/train' # 初始筛选过一次的数据集
|
||||
# # train_root = './data/0612_train/train'
|
||||
# test_root = "./data/2250_train/val/"
|
||||
# # test_root = "./data/0612_train/val"
|
||||
# test_list = "./data/2250_train/val_pair.txt"
|
||||
#
|
||||
# test_group_json = "./2250_train/cross_same_0508.json"
|
||||
#
|
||||
#
|
||||
# # test_list = "./data/test_data_100/val_pair.txt"
|
||||
#
|
||||
# # training settings
|
||||
# checkpoints = "checkpoints/resnet18_0613/" # [resnet18, mobilevit_s, mobilenet_v2, mobilenetv3]
|
||||
# restore = False
|
||||
# # restore_model = "checkpoints/renet18_2250_0315/best_resnet18_2250_0315.pth" # best_resnet18_1491_0306.pth
|
||||
# restore_model = "checkpoints/resnet18_0515/best.pth" # best_resnet18_1491_0306.pth
|
||||
#
|
||||
# # test_model = "checkpoints/renet18_2250_0314/best_resnet18_2250_0314.pth"
|
||||
# testbackbone = 'resnet18' # [resnet18, mobilevit_s, mobilenet_v2, mobilenetv3_small, mobilenetv3_large, mobilenet_v1, PPLCNET_x1_0, PPLCNET_x0_5]
|
||||
# test_val = "D:/比对/cl"
|
||||
# # test_val = "./data/test_data_100"
|
||||
#
|
||||
# # test_model = "checkpoints/zhanting_res_801.pth"
|
||||
# test_model = "checkpoints/resnet18_0515/v11.pth"
|
||||
#
|
||||
#
|
||||
#
|
||||
# train_batch_size = 512 # 256
|
||||
# test_batch_size = 256 # 256
|
||||
#
|
||||
# epoch = 300
|
||||
# optimizer = 'sgd' # ['sgd', 'adam']
|
||||
# lr = 1.5e-2 # 1e-2
|
||||
# lr_step = 5 # 10
|
||||
# lr_decay = 0.95 # 0.98
|
||||
# weight_decay = 5e-4
|
||||
# loss = 'cross_entropy' # ['focal_loss', 'cross_entropy']
|
||||
# # device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
|
||||
# device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
||||
#
|
||||
# pin_memory = True # if memory is large, set it True to speed up a bit
|
||||
# num_workers = 4 # dataloader
|
||||
#
|
||||
# group_test = True
|
||||
#
|
||||
# config = Config()
|
547
detecttracking/contrast/feat_extract/inference.py
Normal file
547
detecttracking/contrast/feat_extract/inference.py
Normal file
@ -0,0 +1,547 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
|
||||
@author: LiChen
|
||||
"""
|
||||
import numpy as np
|
||||
import torch
|
||||
from pathlib import Path
|
||||
from utils.config import config as cfg
|
||||
|
||||
curpath = Path(__file__).resolve().parents[0]
|
||||
|
||||
|
||||
class FeatsInterface:
|
||||
def __init__(self, resnetModel=None):
|
||||
self.device = cfg.device
|
||||
self.transform = cfg.test_transform
|
||||
self.batch_size = cfg.batch_size
|
||||
self.embedding_size = cfg.embedding_size
|
||||
assert resnetModel is not None, "resnetModel is None"
|
||||
self.model = resnetModel
|
||||
print(f"Model type: {type(self.model)}")
|
||||
|
||||
def inference(self, images, detections=None):
|
||||
'''
|
||||
如果是BGR,需要转变为RGB格式
|
||||
'''
|
||||
if isinstance(images, np.ndarray):
|
||||
imgs, features = self.inference_image(images, detections)
|
||||
return imgs, features
|
||||
|
||||
batch_patches = []
|
||||
patches = []
|
||||
for i, img in enumerate(images):
|
||||
img = img.copy()
|
||||
patch = self.transform(img)
|
||||
if str(self.device) != "cpu":
|
||||
# patch = patch.to(device=self.device).half()
|
||||
patch = patch.to(device=self.device)
|
||||
else:
|
||||
patch = patch.to(device=self.device)
|
||||
|
||||
patches.append(patch)
|
||||
if (i + 1) % self.batch_size == 0:
|
||||
patches = torch.stack(patches, dim=0)
|
||||
batch_patches.append(patches)
|
||||
patches = []
|
||||
|
||||
if len(patches):
|
||||
patches = torch.stack(patches, dim=0)
|
||||
batch_patches.append(patches)
|
||||
|
||||
features = np.zeros((0, self.embedding_size))
|
||||
for patches in batch_patches:
|
||||
pred = self.model(patches)
|
||||
pred[torch.isinf(pred)] = 1.0
|
||||
feat = pred.cpu().data.numpy()
|
||||
features = np.vstack((features, feat))
|
||||
return features
|
||||
|
||||
def inference_image(self, image, detections):
|
||||
H, W, _ = np.shape(image)
|
||||
|
||||
batch_patches = []
|
||||
patches = []
|
||||
imgs = []
|
||||
for d in range(np.size(detections, 0)):
|
||||
tlbr = detections[d, :4].astype(np.int_)
|
||||
tlbr[0] = max(0, tlbr[0])
|
||||
tlbr[1] = max(0, tlbr[1])
|
||||
tlbr[2] = min(W - 1, tlbr[2])
|
||||
tlbr[3] = min(H - 1, tlbr[3])
|
||||
img = image[tlbr[1]:tlbr[3], tlbr[0]:tlbr[2], :]
|
||||
|
||||
imgs.append(img)
|
||||
|
||||
img1 = img[:, :, ::-1].copy() # the model expects RGB inputs
|
||||
patch = self.transform(img1)
|
||||
|
||||
# patch = patch.to(device=self.device).half()
|
||||
if str(self.device) != "cpu":
|
||||
# patch = patch.to(device=self.device).half()
|
||||
patch = patch.to(device=self.device)
|
||||
else:
|
||||
patch = patch.to(device=self.device)
|
||||
|
||||
patches.append(patch)
|
||||
if (d + 1) % self.batch_size == 0:
|
||||
patches = torch.stack(patches, dim=0)
|
||||
batch_patches.append(patches)
|
||||
patches = []
|
||||
|
||||
if len(patches):
|
||||
patches = torch.stack(patches, dim=0)
|
||||
batch_patches.append(patches)
|
||||
|
||||
features = np.zeros((0, self.embedding_size))
|
||||
for patches in batch_patches:
|
||||
pred = self.model(patches)
|
||||
pred[torch.isinf(pred)] = 1.0
|
||||
feat = pred.cpu().data.numpy()
|
||||
features = np.vstack((features, feat))
|
||||
|
||||
return imgs, features
|
||||
|
||||
|
||||
# def unique_image(pair_list) -> set:
|
||||
# """Return unique image path in pair_list.txt"""
|
||||
# with open(pair_list, 'r') as fd:
|
||||
# pairs = fd.readlines()
|
||||
# unique = set()
|
||||
# for pair in pairs:
|
||||
# id1, id2, _ = pair.split()
|
||||
# unique.add(id1)
|
||||
# unique.add(id2)
|
||||
# return unique
|
||||
#
|
||||
#
|
||||
# def group_image(images: set, batch) -> list:
|
||||
# """Group image paths by batch size"""
|
||||
# images = list(images)
|
||||
# size = len(images)
|
||||
# res = []
|
||||
# for i in range(0, size, batch):
|
||||
# end = min(batch + i, size)
|
||||
# res.append(images[i: end])
|
||||
# return res
|
||||
#
|
||||
#
|
||||
# def _preprocess(images: list, transform) -> torch.Tensor:
|
||||
# res = []
|
||||
# for img in images:
|
||||
# im = Image.open(img)
|
||||
# im = transform(im)
|
||||
# res.append(im)
|
||||
# # data = torch.cat(res, dim=0) # shape: (batch, 128, 128)
|
||||
# # data = data[:, None, :, :] # shape: (batch, 1, 128, 128)
|
||||
# data = torch.stack(res)
|
||||
# return data
|
||||
#
|
||||
#
|
||||
# def test_preprocess(images: list, transform) -> torch.Tensor:
|
||||
# res = []
|
||||
# for img in images:
|
||||
# im = Image.open(img)
|
||||
# im = transform(im)
|
||||
# res.append(im)
|
||||
# # data = torch.cat(res, dim=0) # shape: (batch, 128, 128)
|
||||
# # data = data[:, None, :, :] # shape: (batch, 1, 128, 128)
|
||||
# data = torch.stack(res)
|
||||
# return data
|
||||
#
|
||||
#
|
||||
# def featurize(images: list, transform, net, device, train=False) -> dict:
|
||||
# """featurize each image and save into a dictionary
|
||||
# Args:
|
||||
# images: image paths
|
||||
# transform: test transform
|
||||
# net: pretrained model
|
||||
# device: cpu or cuda
|
||||
# Returns:
|
||||
# Dict (key: imagePath, value: feature)
|
||||
# """
|
||||
# if train:
|
||||
# data = _preprocess(images, transform)
|
||||
# data = data.to(device)
|
||||
# net = net.to(device)
|
||||
# with torch.no_grad():
|
||||
# features = net(data)
|
||||
# res = {img: feature for (img, feature) in zip(images, features)}
|
||||
# else:
|
||||
# data = test_preprocess(images, transform)
|
||||
# data = data.to(device)
|
||||
# net = net.to(device)
|
||||
# with torch.no_grad():
|
||||
# features = net(data)
|
||||
# res = {img: feature for (img, feature) in zip(images, features)}
|
||||
# return res
|
||||
#
|
||||
#
|
||||
# # def inference_image(images: list, transform, net, device, bs=16, embedding_size=256) -> dict:
|
||||
# # batch_patches = []
|
||||
# # patches = []
|
||||
# # for d, img in enumerate(images):
|
||||
# # img = Image.open(img)
|
||||
# # patch = transform(img)
|
||||
#
|
||||
# # if str(device) != "cpu":
|
||||
# # patch = patch.to(device).half()
|
||||
# # else:
|
||||
# # patch = patch.to(device)
|
||||
#
|
||||
# # patches.append(patch)
|
||||
# # if (d + 1) % bs == 0:
|
||||
# # patches = torch.stack(patches, dim=0)
|
||||
# # batch_patches.append(patches)
|
||||
# # patches = []
|
||||
#
|
||||
# # if len(patches):
|
||||
# # patches = torch.stack(patches, dim=0)
|
||||
# # batch_patches.append(patches)
|
||||
#
|
||||
# # features = np.zeros((0, embedding_size), dtype=np.float32)
|
||||
# # for patches in batch_patches:
|
||||
# # pred = net(patches)
|
||||
# # pred[torch.isinf(pred)] = 1.0
|
||||
# # feat = pred.cpu().data.numpy()
|
||||
# # features = np.vstack((features, feat))
|
||||
#
|
||||
#
|
||||
# # return features
|
||||
#
|
||||
#
|
||||
# def featurize_1(images: list, transform, net, device, train=False) -> dict:
|
||||
# """featurize each image and save into a dictionary
|
||||
# Args:
|
||||
# images: image paths
|
||||
# transform: test transform
|
||||
# net: pretrained model
|
||||
# device: cpu or cuda
|
||||
# Returns:
|
||||
# Dict (key: imagePath, value: feature)
|
||||
# """
|
||||
#
|
||||
# data = test_preprocess(images, transform)
|
||||
# data = data.to(device)
|
||||
# net = net.to(device)
|
||||
# with torch.no_grad():
|
||||
# features = net(data).data.numpy()
|
||||
#
|
||||
# return features
|
||||
#
|
||||
#
|
||||
# def cosin_metric(x1, x2):
|
||||
# return np.dot(x1, x2) / (np.linalg.norm(x1) * np.linalg.norm(x2))
|
||||
#
|
||||
#
|
||||
# def threshold_search(y_score, y_true):
|
||||
# y_score = np.asarray(y_score)
|
||||
# y_true = np.asarray(y_true)
|
||||
# best_acc = 0
|
||||
# best_th = 0
|
||||
# for i in range(len(y_score)):
|
||||
# th = y_score[i]
|
||||
# y_test = (y_score >= th)
|
||||
# acc = np.mean((y_test == y_true).astype(int))
|
||||
# if acc > best_acc:
|
||||
# best_acc = acc
|
||||
# best_th = th
|
||||
# return best_acc, best_th
|
||||
#
|
||||
#
|
||||
# def showgrid(recall, recall_TN, PrecisePos, PreciseNeg):
|
||||
# x = np.linspace(start=-1.0, stop=1.0, num=50, endpoint=True).tolist()
|
||||
# plt.figure(figsize=(10, 6))
|
||||
# plt.plot(x, recall, color='red', label='recall')
|
||||
# plt.plot(x, recall_TN, color='black', label='recall_TN')
|
||||
# plt.plot(x, PrecisePos, color='blue', label='PrecisePos')
|
||||
# plt.plot(x, PreciseNeg, color='green', label='PreciseNeg')
|
||||
# plt.legend()
|
||||
# plt.xlabel('threshold')
|
||||
# # plt.ylabel('Similarity')
|
||||
# plt.grid(True, linestyle='--', alpha=0.5)
|
||||
# plt.savefig('accuracy_recall_grid.png')
|
||||
# plt.show()
|
||||
# plt.close()
|
||||
#
|
||||
#
|
||||
# def compute_accuracy_recall(score, labels):
|
||||
# th = 0.1
|
||||
# squence = np.linspace(-1, 1, num=50)
|
||||
# # squence = [0.4]
|
||||
# recall, PrecisePos, PreciseNeg, recall_TN = [], [], [], []
|
||||
# for th in squence:
|
||||
# t_score = (score > th)
|
||||
# t_labels = (labels == 1)
|
||||
# # print(t_score)
|
||||
# # print(t_labels)
|
||||
# TP = np.sum(np.logical_and(t_score, t_labels))
|
||||
# FN = np.sum(np.logical_and(np.logical_not(t_score), t_labels))
|
||||
# f_score = (score < th)
|
||||
# f_labels = (labels == 0)
|
||||
# TN = np.sum(np.logical_and(f_score, f_labels))
|
||||
# FP = np.sum(np.logical_and(np.logical_not(f_score), f_labels))
|
||||
# print("Threshold:{} TP:{},FP:{},TN:{},FN:{}".format(th, TP, FP, TN, FN))
|
||||
#
|
||||
# PrecisePos.append(0 if TP / (TP + FP) == 'nan' else TP / (TP + FP))
|
||||
# PreciseNeg.append(0 if TN == 0 else TN / (TN + FN))
|
||||
# recall.append(0 if TP == 0 else TP / (TP + FN))
|
||||
# recall_TN.append(0 if TN == 0 else TN / (TN + FP))
|
||||
# showgrid(recall, recall_TN, PrecisePos, PreciseNeg)
|
||||
#
|
||||
#
|
||||
# def compute_accuracy(feature_dict, pair_list, test_root):
|
||||
# with open(pair_list, 'r') as f:
|
||||
# pairs = f.readlines()
|
||||
#
|
||||
# similarities = []
|
||||
# labels = []
|
||||
# for pair in pairs:
|
||||
# img1, img2, label = pair.split()
|
||||
# img1 = osp.join(test_root, img1)
|
||||
# img2 = osp.join(test_root, img2)
|
||||
# feature1 = feature_dict[img1].cpu().numpy()
|
||||
# feature2 = feature_dict[img2].cpu().numpy()
|
||||
# label = int(label)
|
||||
#
|
||||
# similarity = cosin_metric(feature1, feature2)
|
||||
# similarities.append(similarity)
|
||||
# labels.append(label)
|
||||
#
|
||||
# accuracy, threshold = threshold_search(similarities, labels)
|
||||
# # print('similarities >> {}'.format(similarities))
|
||||
# # print('labels >> {}'.format(labels))
|
||||
# compute_accuracy_recall(np.array(similarities), np.array(labels))
|
||||
# return accuracy, threshold
|
||||
|
||||
|
||||
# def deal_group_pair(pairList1, pairList2):
|
||||
# allsimilarity = []
|
||||
# one_similarity = []
|
||||
# for pair1 in pairList1:
|
||||
# for pair2 in pairList2:
|
||||
# similarity = cosin_metric(pair1.cpu().numpy(), pair2.cpu().numpy())
|
||||
# one_similarity.append(similarity)
|
||||
# allsimilarity.append(max(one_similarity)) # 最大值
|
||||
# # allsimilarity.append(sum(one_similarity)/len(one_similarity)) # 均值
|
||||
# # allsimilarity.append(statistics.median(one_similarity)) # 中位数
|
||||
# # print(allsimilarity)
|
||||
# # print(labels)
|
||||
# return allsimilarity
|
||||
|
||||
|
||||
# def compute_group_accuracy(content_list_read):
|
||||
# allSimilarity, allLabel = [], []
|
||||
# for data_loaded in content_list_read:
|
||||
# one_group_list = []
|
||||
# for i in range(2):
|
||||
# images = [osp.join(conf.test_val, img) for img in data_loaded[i]]
|
||||
# group = group_image(images, conf.test_batch_size)
|
||||
# d = featurize(group[0], conf.test_transform, model, conf.device)
|
||||
# one_group_list.append(d.values())
|
||||
# similarity = deal_group_pair(one_group_list[0], one_group_list[1])
|
||||
# allLabel.append(data_loaded[-1])
|
||||
# allSimilarity.extend(similarity)
|
||||
# # print(allSimilarity)
|
||||
# # print(allLabel)
|
||||
# return allSimilarity, allLabel
|
||||
|
||||
|
||||
# def compute_contrast_accuracy(content_list_read):
|
||||
# npairs = 50
|
||||
#
|
||||
# same_folder_pairs = content_list_read['same_folder_pairs']
|
||||
# cross_folder_pairs = content_list_read['cross_folder_pairs']
|
||||
#
|
||||
# npairs = min((len(same_folder_pairs), len(cross_folder_pairs)))
|
||||
#
|
||||
# Encoder = FeatsInterface(conf)
|
||||
#
|
||||
# same_pairs = same_folder_pairs[:npairs]
|
||||
# cross_pairs = cross_folder_pairs[:npairs]
|
||||
#
|
||||
# same_pairs_similarity = []
|
||||
# for i in range(len(same_pairs)):
|
||||
# images_a = [osp.join(conf.test_val, img) for img in same_pairs[i][0]]
|
||||
# images_b = [osp.join(conf.test_val, img) for img in same_pairs[i][1]]
|
||||
#
|
||||
# feats_a = Encoder.inference(images_a)
|
||||
# feats_b = Encoder.inference(images_b)
|
||||
# # matrix = 1- np.maximum(0.0, cdist(feats_a, feats_b, 'cosine'))
|
||||
# matrix = 1 - cdist(feats_a, feats_b, 'cosine')
|
||||
#
|
||||
# feats_am = np.mean(feats_a, axis=0, keepdims=True)
|
||||
# feats_bm = np.mean(feats_b, axis=0, keepdims=True)
|
||||
# matrixm = 1 - np.maximum(0.0, cdist(feats_am, feats_bm, 'cosine'))
|
||||
#
|
||||
# same_pairs_similarity.append(np.mean(matrix))
|
||||
#
|
||||
# '''保存相同 Barcode 图像对'''
|
||||
# # foldi = os.path.join('./result/same', f'{i}')
|
||||
# # if os.path.exists(foldi):
|
||||
# # shutil.rmtree(foldi)
|
||||
# # os.makedirs(foldi)
|
||||
# # else:
|
||||
# # os.makedirs(foldi)
|
||||
# # for ipt in range(len(images_a)):
|
||||
# # source_path = images_a[ipt]
|
||||
# # destination_path = os.path.join(foldi, f'a_{ipt}.png')
|
||||
# # shutil.copy2(source_path, destination_path)
|
||||
# # for ipt in range(len(images_b)):
|
||||
# # source_path = images_b[ipt]
|
||||
# # destination_path = os.path.join(foldi, f'b_{ipt}.png')
|
||||
# # shutil.copy2(source_path, destination_path)
|
||||
#
|
||||
# cross_pairs_similarity = []
|
||||
# for i in range(len(cross_pairs)):
|
||||
# images_a = [osp.join(conf.test_val, img) for img in cross_pairs[i][0]]
|
||||
# images_b = [osp.join(conf.test_val, img) for img in cross_pairs[i][1]]
|
||||
#
|
||||
# feats_a = Encoder.inference(images_a)
|
||||
# feats_b = Encoder.inference(images_b)
|
||||
# # matrix = 1- np.maximum(0.0, cdist(feats_a, feats_b, 'cosine'))
|
||||
# matrix = 1 - cdist(feats_a, feats_b, 'cosine')
|
||||
#
|
||||
# feats_am = np.mean(feats_a, axis=0, keepdims=True)
|
||||
# feats_bm = np.mean(feats_b, axis=0, keepdims=True)
|
||||
# matrixm = 1 - np.maximum(0.0, cdist(feats_am, feats_bm, 'cosine'))
|
||||
#
|
||||
# cross_pairs_similarity.append(np.mean(matrix))
|
||||
#
|
||||
# '''保存不同 Barcode 图像对'''
|
||||
# # foldi = os.path.join('./result/cross', f'{i}')
|
||||
# # if os.path.exists(foldi):
|
||||
# # shutil.rmtree(foldi)
|
||||
# # os.makedirs(foldi)
|
||||
# # else:
|
||||
# # os.makedirs(foldi)
|
||||
# # for ipt in range(len(images_a)):
|
||||
# # source_path = images_a[ipt]
|
||||
# # destination_path = os.path.join(foldi, f'a_{ipt}.png')
|
||||
# # shutil.copy2(source_path, destination_path)
|
||||
# # for ipt in range(len(images_b)):
|
||||
# # source_path = images_b[ipt]
|
||||
# # destination_path = os.path.join(foldi, f'b_{ipt}.png')
|
||||
# # shutil.copy2(source_path, destination_path)
|
||||
#
|
||||
# Thresh = np.linspace(-0.2, 1, 100)
|
||||
#
|
||||
# Same = np.array(same_pairs_similarity)
|
||||
# Cross = np.array(cross_pairs_similarity)
|
||||
#
|
||||
# fig, axs = plt.subplots(2, 1)
|
||||
# axs[0].hist(Same, bins=60, edgecolor='black')
|
||||
# axs[0].set_xlim([-0.2, 1])
|
||||
# axs[0].set_title('Same Barcode')
|
||||
#
|
||||
# axs[1].hist(Cross, bins=60, edgecolor='black')
|
||||
# axs[1].set_xlim([-0.2, 1])
|
||||
# axs[1].set_title('Cross Barcode')
|
||||
#
|
||||
# TPFN = len(Same)
|
||||
# TNFP = len(Cross)
|
||||
# Recall_Pos, Recall_Neg = [], []
|
||||
# Precision_Pos, Precision_Neg = [], []
|
||||
# Correct = []
|
||||
# for th in Thresh:
|
||||
# TP = np.sum(Same > th)
|
||||
# FN = TPFN - TP
|
||||
# TN = np.sum(Cross < th)
|
||||
# FP = TNFP - TN
|
||||
#
|
||||
# Recall_Pos.append(TP / TPFN)
|
||||
# Recall_Neg.append(TN / TNFP)
|
||||
# Precision_Pos.append(TP / (TP + FP))
|
||||
# Precision_Neg.append(TN / (TN + FN))
|
||||
# Correct.append((TN + TP) / (TPFN + TNFP))
|
||||
#
|
||||
# fig, ax = plt.subplots()
|
||||
# ax.plot(Thresh, Correct, 'r', label='Correct: (TN+TP)/(TPFN+TNFP)')
|
||||
# ax.plot(Thresh, Recall_Pos, 'b', label='Recall_Pos: TP/TPFN')
|
||||
# ax.plot(Thresh, Recall_Neg, 'g', label='Recall_Neg: TN/TNFP')
|
||||
# ax.plot(Thresh, Precision_Pos, 'c', label='Precision_Pos: TP/(TP+FP)')
|
||||
# ax.plot(Thresh, Precision_Neg, 'm', label='Precision_Neg: TN/(TN+FN)')
|
||||
#
|
||||
# ax.set_xlim([0, 1])
|
||||
# ax.set_ylim([0, 1])
|
||||
# ax.grid(True)
|
||||
# ax.set_title('PrecisePos & PreciseNeg')
|
||||
# ax.legend()
|
||||
# plt.show()
|
||||
#
|
||||
# print("Haved done!!!")
|
||||
#
|
||||
#
|
||||
# if __name__ == '__main__':
|
||||
#
|
||||
# # Network Setup
|
||||
# if conf.testbackbone == 'resnet18':
|
||||
# # model = ResIRSE(conf.img_size, conf.embedding_size, conf.drop_ratio).to(conf.device)
|
||||
# model = resnet18().to(conf.device)
|
||||
# # elif conf.testbackbone == 'resnet34':
|
||||
# # model = resnet34().to(conf.device)
|
||||
# # elif conf.testbackbone == 'resnet50':
|
||||
# # model = resnet50().to(conf.device)
|
||||
# # elif conf.testbackbone == 'mobilevit_s':
|
||||
# # model = mobilevit_s().to(conf.device)
|
||||
# # elif conf.testbackbone == 'mobilenetv3':
|
||||
# # model = MobileNetV3_Small().to(conf.device)
|
||||
# # elif conf.testbackbone == 'mobilenet_v1':
|
||||
# # model = mobilenet_v1().to(conf.device)
|
||||
# # elif conf.testbackbone == 'PPLCNET_x1_0':
|
||||
# # model = PPLCNET_x1_0().to(conf.device)
|
||||
# # elif conf.testbackbone == 'PPLCNET_x0_5':
|
||||
# # model = PPLCNET_x0_5().to(conf.device)
|
||||
# # elif conf.backbone == 'PPLCNET_x2_5':
|
||||
# # model = PPLCNET_x2_5().to(conf.device)
|
||||
# # elif conf.testbackbone == 'mobilenet_v2':
|
||||
# # model = mobilenet_v2().to(conf.device)
|
||||
# # elif conf.testbackbone == 'resnet14':
|
||||
# # model = resnet14().to(conf.device)
|
||||
# else:
|
||||
# raise ValueError('Have not model {}'.format(conf.backbone))
|
||||
#
|
||||
# print('load model {} '.format(conf.testbackbone))
|
||||
# # model = nn.DataParallel(model).to(conf.device)
|
||||
# model.load_state_dict(torch.load(conf.test_model, map_location=conf.device))
|
||||
# model.eval()
|
||||
# if not conf.group_test:
|
||||
# images = unique_image(conf.test_list)
|
||||
# images = [osp.join(conf.test_val, img) for img in images]
|
||||
#
|
||||
# groups = group_image(images, conf.test_batch_size) ##根据batch_size取图片
|
||||
#
|
||||
# feature_dict = dict()
|
||||
# for group in groups:
|
||||
# d = featurize(group, conf.test_transform, model, conf.device)
|
||||
# feature_dict.update(d)
|
||||
# # print('feature_dict', feature_dict)
|
||||
# accuracy, threshold = compute_accuracy(feature_dict, conf.test_list, conf.test_val)
|
||||
#
|
||||
# print(
|
||||
# f"Test Model: {conf.test_model}\n"
|
||||
# f"Accuracy: {accuracy:.3f}\n"
|
||||
# f"Threshold: {threshold:.3f}\n"
|
||||
# )
|
||||
# elif conf.group_test:
|
||||
# """
|
||||
# conf.test_val: 测试数据集地址
|
||||
# conf.test_group_json:测试数据分组配置文件
|
||||
# """
|
||||
# filename = conf.test_group_json
|
||||
#
|
||||
# filename = "../cl/images_1.json"
|
||||
# with open(filename, 'r', encoding='utf-8') as file:
|
||||
# content_list_read = json.load(file)
|
||||
#
|
||||
# compute_contrast_accuracy(content_list_read)
|
||||
|
||||
# =============================================================================
|
||||
# Similarity, Label = compute_group_accuracy(content_list_read)
|
||||
# print('allSimilarity >> {}'.format(Similarity))
|
||||
# print('allLabel >> {}'.format(Label))
|
||||
# compute_accuracy_recall(np.array(Similarity), np.array(Label))
|
||||
# # compute_group_accuracy(data_loaded)
|
||||
#
|
||||
# =============================================================================
|
12
detecttracking/contrast/feat_extract/resnet_vit/.idea/contrastInference.iml
generated
Normal file
12
detecttracking/contrast/feat_extract/resnet_vit/.idea/contrastInference.iml
generated
Normal file
@ -0,0 +1,12 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<module type="PYTHON_MODULE" version="4">
|
||||
<component name="NewModuleRootManager">
|
||||
<content url="file://$MODULE_DIR$" />
|
||||
<orderEntry type="jdk" jdkName="Python 3.8 (my_env)" jdkType="Python SDK" />
|
||||
<orderEntry type="sourceFolder" forTests="false" />
|
||||
</component>
|
||||
<component name="PyDocumentationSettings">
|
||||
<option name="format" value="PLAIN" />
|
||||
<option name="myDocStringFormat" value="Plain" />
|
||||
</component>
|
||||
</module>
|
14
detecttracking/contrast/feat_extract/resnet_vit/.idea/deployment.xml
generated
Normal file
14
detecttracking/contrast/feat_extract/resnet_vit/.idea/deployment.xml
generated
Normal file
@ -0,0 +1,14 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="PublishConfigData" remoteFilesAllowedToDisappearOnAutoupload="false">
|
||||
<serverData>
|
||||
<paths name="lc@192.168.1.142:22 password">
|
||||
<serverdata>
|
||||
<mappings>
|
||||
<mapping local="$PROJECT_DIR$" web="/" />
|
||||
</mappings>
|
||||
</serverdata>
|
||||
</paths>
|
||||
</serverData>
|
||||
</component>
|
||||
</project>
|
12
detecttracking/contrast/feat_extract/resnet_vit/.idea/inspectionProfiles/Project_Default.xml
generated
Normal file
12
detecttracking/contrast/feat_extract/resnet_vit/.idea/inspectionProfiles/Project_Default.xml
generated
Normal file
@ -0,0 +1,12 @@
|
||||
<component name="InspectionProjectProfileManager">
|
||||
<profile version="1.0">
|
||||
<option name="myName" value="Project Default" />
|
||||
<inspection_tool class="PyPep8NamingInspection" enabled="true" level="WEAK WARNING" enabled_by_default="true">
|
||||
<option name="ignoredErrors">
|
||||
<list>
|
||||
<option value="N803" />
|
||||
</list>
|
||||
</option>
|
||||
</inspection_tool>
|
||||
</profile>
|
||||
</component>
|
6
detecttracking/contrast/feat_extract/resnet_vit/.idea/inspectionProfiles/profiles_settings.xml
generated
Normal file
6
detecttracking/contrast/feat_extract/resnet_vit/.idea/inspectionProfiles/profiles_settings.xml
generated
Normal file
@ -0,0 +1,6 @@
|
||||
<component name="InspectionProjectProfileManager">
|
||||
<settings>
|
||||
<option name="USE_PROJECT_PROFILE" value="false" />
|
||||
<version value="1.0" />
|
||||
</settings>
|
||||
</component>
|
7
detecttracking/contrast/feat_extract/resnet_vit/.idea/misc.xml
generated
Normal file
7
detecttracking/contrast/feat_extract/resnet_vit/.idea/misc.xml
generated
Normal file
@ -0,0 +1,7 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="Black">
|
||||
<option name="sdkName" value="Python 3.8 (my_env)" />
|
||||
</component>
|
||||
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.8 (my_env)" project-jdk-type="Python SDK" />
|
||||
</project>
|
8
detecttracking/contrast/feat_extract/resnet_vit/.idea/modules.xml
generated
Normal file
8
detecttracking/contrast/feat_extract/resnet_vit/.idea/modules.xml
generated
Normal file
@ -0,0 +1,8 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="ProjectModuleManager">
|
||||
<modules>
|
||||
<module fileurl="file://$PROJECT_DIR$/.idea/contrastInference.iml" filepath="$PROJECT_DIR$/.idea/contrastInference.iml" />
|
||||
</modules>
|
||||
</component>
|
||||
</project>
|
@ -0,0 +1 @@
|
||||
# from .config import config
|
84
detecttracking/contrast/feat_extract/resnet_vit/config.py
Normal file
84
detecttracking/contrast/feat_extract/resnet_vit/config.py
Normal file
@ -0,0 +1,84 @@
|
||||
import torch
|
||||
import torchvision.transforms as T
|
||||
|
||||
|
||||
class Config:
|
||||
# network settings
|
||||
backbone = 'vit' # [resnet18, mobilevit_s, mobilenet_v2, mobilenetv3_small, mobilenetv3_large, mobilenet_v1, PPLCNET_x1_0, PPLCNET_x0_5, PPLCNET_x2_5]
|
||||
metric = 'softmax' # [cosface, arcface, softmax]
|
||||
cbam = True
|
||||
embedding_size = 256 # 256
|
||||
drop_ratio = 0.5
|
||||
img_size = 224
|
||||
|
||||
teacher = 'vit' # [resnet18, mobilevit_s, mobilenet_v2, mobilenetv3_small, mobilenetv3_large, mobilenet_v1, PPLCNET_x1_0, PPLCNET_x0_5, PPLCNET_x2_5]
|
||||
student = 'resnet'
|
||||
# data preprocess
|
||||
# input_shape = [1, 128, 128]
|
||||
"""transforms.RandomCrop(size),
|
||||
transforms.RandomVerticalFlip(p=0.5),
|
||||
transforms.RandomHorizontalFlip(),
|
||||
RandomRotate(15, 0.3),
|
||||
# RandomGaussianBlur()"""
|
||||
|
||||
train_transform = T.Compose([
|
||||
T.ToTensor(),
|
||||
T.Resize((img_size, img_size)),
|
||||
# T.RandomCrop(img_size*4//5),
|
||||
# T.RandomHorizontalFlip(p=0.5),
|
||||
T.RandomRotation(180),
|
||||
T.ColorJitter(brightness=0.5),
|
||||
T.ConvertImageDtype(torch.float32),
|
||||
T.Normalize(mean=[0.5], std=[0.5]),
|
||||
])
|
||||
test_transform = T.Compose([
|
||||
T.ToTensor(),
|
||||
T.Resize((img_size, img_size)),
|
||||
T.ConvertImageDtype(torch.float32),
|
||||
T.Normalize(mean=[0.5], std=[0.5]),
|
||||
])
|
||||
|
||||
# dataset
|
||||
train_root = './data/2250_train/train' # 初始筛选过一次的数据集
|
||||
# train_root = './data/0625_train/train'
|
||||
test_root = "./data/2250_train/val/"
|
||||
# test_root = "./data/0625_train/val"
|
||||
|
||||
test_list = "./data/2250_train/val_pair.txt"
|
||||
test_group_json = "./data/2250_train/cross_same.json"
|
||||
# test_group_json = "./data/0625_train/cross_same.json"
|
||||
# test_list = "./data/test_data_100/val_pair.txt"
|
||||
|
||||
# training settings
|
||||
checkpoints = "checkpoints/vit_b_16_0815/" # [resnet18, mobilevit_s, mobilenet_v2, mobilenetv3]
|
||||
restore = True
|
||||
# restore_model = "checkpoints/renet18_2250_0315/best_resnet18_2250_0315.pth" # best_resnet18_1491_0306.pth
|
||||
restore_model = "checkpoints/vit_b_16_0730/best.pth" # best_resnet18_1491_0306.pth
|
||||
|
||||
# test_model = "./checkpoints/renet18_1887_0311/best_resnet18_1887_0311.pth"
|
||||
testbackbone = 'resnet18' # [resnet18, mobilevit_s, mobilenet_v2, mobilenetv3_small, mobilenetv3_large, mobilenet_v1, PPLCNET_x1_0, PPLCNET_x0_5]
|
||||
# test_val = "./data/2250_train"
|
||||
test_val = "./data/0625_train"
|
||||
test_model = "checkpoints/resnet18_0721/best.pth"
|
||||
|
||||
train_batch_size = 128 # 256
|
||||
test_batch_size = 256 # 256
|
||||
|
||||
|
||||
epoch = 300
|
||||
optimizer = 'adamw' # ['sgd', 'adam', 'adamw']
|
||||
lr = 1e-3 # 1e-2
|
||||
lr_step = 10 # 10
|
||||
lr_decay = 0.95 # 0.98
|
||||
weight_decay = 5e-4
|
||||
loss = 'focal_loss' # ['focal_loss', 'cross_entropy']
|
||||
device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
|
||||
# device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
||||
|
||||
pin_memory = True # if memory is large, set it True to speed up a bit
|
||||
num_workers = 4 # dataloader
|
||||
|
||||
group_test = True
|
||||
# group_test = False
|
||||
|
||||
config = Config()
|
103
detecttracking/contrast/feat_extract/resnet_vit/inference.py
Normal file
103
detecttracking/contrast/feat_extract/resnet_vit/inference.py
Normal file
@ -0,0 +1,103 @@
|
||||
import os
|
||||
import os.path as osp
|
||||
|
||||
import torch
|
||||
|
||||
import numpy as np
|
||||
from model import resnet18
|
||||
from PIL import Image
|
||||
|
||||
from torch.nn.functional import softmax
|
||||
from config import config as conf
|
||||
import time
|
||||
|
||||
embedding_size = conf.embedding_size
|
||||
img_size = conf.img_size
|
||||
device = conf.device
|
||||
|
||||
def load_contrast_model():
|
||||
model = resnet18().to(conf.device)
|
||||
model.load_state_dict(torch.load(conf.test_model, map_location=conf.device))
|
||||
model.eval()
|
||||
print('load model {} '.format(conf.testbackbone))
|
||||
|
||||
return model
|
||||
|
||||
|
||||
def group_image(imageDirs, batch) -> list:
|
||||
images = []
|
||||
"""Group image paths by batch size"""
|
||||
with os.scandir(imageDirs) as entries:
|
||||
for imgpth in entries:
|
||||
print(imgpth)
|
||||
images.append(os.sep.join([imageDirs, imgpth.name]))
|
||||
print(f"{len(images)} images in {imageDirs}")
|
||||
size = len(images)
|
||||
res = []
|
||||
for i in range(0, size, batch):
|
||||
end = min(batch + i, size)
|
||||
res.append(images[i: end])
|
||||
return res
|
||||
|
||||
def test_preprocess(images: list, transform) -> torch.Tensor:
|
||||
res = []
|
||||
for img in images:
|
||||
# print(img)
|
||||
im = Image.open(img)
|
||||
im = transform(im)
|
||||
res.append(im)
|
||||
# data = torch.cat(res, dim=0) # shape: (batch, 128, 128)
|
||||
# data = data[:, None, :, :] # shape: (batch, 1, 128, 128)
|
||||
data = torch.stack(res)
|
||||
return data
|
||||
|
||||
def featurize(images: list, transform, net, device) -> dict:
|
||||
"""featurize each image and save into a dictionary
|
||||
Args:
|
||||
images: image paths
|
||||
transform: test transform
|
||||
net: pretrained model
|
||||
device: cpu or cuda
|
||||
Returns:
|
||||
Dict (key: imagePath, value: feature)
|
||||
"""
|
||||
data = test_preprocess(images, transform)
|
||||
data = data.to(device)
|
||||
net = net.to(device)
|
||||
with torch.no_grad():
|
||||
features = net(data)
|
||||
# res = {img: feature for (img, feature) in zip(images, features)}
|
||||
return features
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Network Setup
|
||||
if conf.testbackbone == 'resnet18':
|
||||
model = resnet18().to(device)
|
||||
else:
|
||||
raise ValueError('Have not model {}'.format(conf.backbone))
|
||||
|
||||
print('load model {} '.format(conf.testbackbone))
|
||||
# model = nn.DataParallel(model).to(conf.device)
|
||||
model.load_state_dict(torch.load(conf.test_model, map_location=conf.device))
|
||||
model.eval()
|
||||
|
||||
# images = unique_image(conf.test_list)
|
||||
# images = [osp.join(conf.test_val, img) for img in images]
|
||||
# print('images', images)
|
||||
# images = ['./data/2250_train/val/6920616313186/6920616313186_6920616313186_20240220-124502_53d2e103-ae3a-4689-b745-9d8723b770fe_front_returnGood_70f75407b7ae_31_01.jpg']
|
||||
|
||||
|
||||
# groups = group_image(conf.test_val, conf.test_batch_size) ##根据batch_size取图片
|
||||
groups = group_image('img_test', 1) ##根据batch_size取图片, 默认batch_size = 8
|
||||
|
||||
feature_dict = dict()
|
||||
for group in groups:
|
||||
s = time.time()
|
||||
features = featurize(group, conf.test_transform, model, conf.device)
|
||||
e = time.time()
|
||||
print('time: {}'.format(e - s))
|
||||
# out = softmax(features, dim=1).argmax(dim=1)
|
||||
# print('d >>> {}'. format(out))
|
||||
# feature_dict.update(d)
|
@ -0,0 +1 @@
|
||||
from .resnet_pre import resnet18, resnet34, resnet50, resnet14
|
@ -0,0 +1,462 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from config import config as conf
|
||||
|
||||
try:
|
||||
from torch.hub import load_state_dict_from_url
|
||||
except ImportError:
|
||||
from torch.utils.model_zoo import load_url as load_state_dict_from_url
|
||||
# from .utils import load_state_dict_from_url
|
||||
|
||||
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
|
||||
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
|
||||
'wide_resnet50_2', 'wide_resnet101_2']
|
||||
|
||||
model_urls = {
|
||||
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
|
||||
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
|
||||
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
|
||||
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
|
||||
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
|
||||
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
|
||||
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
|
||||
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
|
||||
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
|
||||
}
|
||||
|
||||
|
||||
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
|
||||
"""3x3 convolution with padding"""
|
||||
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
|
||||
padding=dilation, groups=groups, bias=False, dilation=dilation)
|
||||
|
||||
def conv1x1(in_planes, out_planes, stride=1):
|
||||
"""1x1 convolution"""
|
||||
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
|
||||
|
||||
|
||||
class SpatialAttention(nn.Module):
|
||||
def __init__(self, kernel_size=7):
|
||||
super(SpatialAttention, self).__init__()
|
||||
|
||||
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
|
||||
padding = 3 if kernel_size == 7 else 1
|
||||
|
||||
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
|
||||
self.sigmoid = nn.Sigmoid()
|
||||
|
||||
def forward(self, x):
|
||||
avg_out = torch.mean(x, dim=1, keepdim=True)
|
||||
max_out, _ = torch.max(x, dim=1, keepdim=True)
|
||||
x = torch.cat([avg_out, max_out], dim=1)
|
||||
x = self.conv1(x)
|
||||
return self.sigmoid(x)
|
||||
|
||||
class BasicBlock(nn.Module):
|
||||
expansion = 1
|
||||
|
||||
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
|
||||
base_width=64, dilation=1, norm_layer=None, cam=False, bam=False):
|
||||
super(BasicBlock, self).__init__()
|
||||
if norm_layer is None:
|
||||
norm_layer = nn.BatchNorm2d
|
||||
if groups != 1 or base_width != 64:
|
||||
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
|
||||
if dilation > 1:
|
||||
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
|
||||
self.cam = cam
|
||||
self.bam = bam
|
||||
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
|
||||
self.conv1 = conv3x3(inplanes, planes, stride)
|
||||
self.bn1 = norm_layer(planes)
|
||||
self.relu = nn.ReLU(inplace=True)
|
||||
self.conv2 = conv3x3(planes, planes)
|
||||
self.bn2 = norm_layer(planes)
|
||||
self.downsample = downsample
|
||||
self.stride = stride
|
||||
if self.cam:
|
||||
if planes == 64:
|
||||
self.globalAvgPool = nn.AvgPool2d(56, stride=1)
|
||||
elif planes == 128:
|
||||
self.globalAvgPool = nn.AvgPool2d(28, stride=1)
|
||||
elif planes == 256:
|
||||
self.globalAvgPool = nn.AvgPool2d(14, stride=1)
|
||||
elif planes == 512:
|
||||
self.globalAvgPool = nn.AvgPool2d(7, stride=1)
|
||||
|
||||
self.fc1 = nn.Linear(in_features=planes, out_features=round(planes / 16))
|
||||
self.fc2 = nn.Linear(in_features=round(planes / 16), out_features=planes)
|
||||
self.sigmod = nn.Sigmoid()
|
||||
if self.bam:
|
||||
self.bam = SpatialAttention()
|
||||
|
||||
def forward(self, x):
|
||||
identity = x
|
||||
|
||||
out = self.conv1(x)
|
||||
out = self.bn1(out)
|
||||
out = self.relu(out)
|
||||
|
||||
out = self.conv2(out)
|
||||
out = self.bn2(out)
|
||||
|
||||
if self.downsample is not None:
|
||||
identity = self.downsample(x)
|
||||
|
||||
if self.cam:
|
||||
ori_out = self.globalAvgPool(out)
|
||||
out = out.view(out.size(0), -1)
|
||||
out = self.fc1(out)
|
||||
out = self.relu(out)
|
||||
out = self.fc2(out)
|
||||
out = self.sigmod(out)
|
||||
out = out.view(out.size(0), out.size(-1), 1, 1)
|
||||
out = out * ori_out
|
||||
|
||||
if self.bam:
|
||||
out = out*self.bam(out)
|
||||
|
||||
out += identity
|
||||
out = self.relu(out)
|
||||
|
||||
return out
|
||||
|
||||
|
||||
class Bottleneck(nn.Module):
|
||||
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
|
||||
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
|
||||
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
|
||||
# This variant is also known as ResNet V1.5 and improves accuracy according to
|
||||
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
|
||||
|
||||
expansion = 4
|
||||
|
||||
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
|
||||
base_width=64, dilation=1, norm_layer=None, cam=False, bam=False):
|
||||
super(Bottleneck, self).__init__()
|
||||
if norm_layer is None:
|
||||
norm_layer = nn.BatchNorm2d
|
||||
width = int(planes * (base_width / 64.)) * groups
|
||||
self.cam = cam
|
||||
self.bam = bam
|
||||
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
|
||||
self.conv1 = conv1x1(inplanes, width)
|
||||
self.bn1 = norm_layer(width)
|
||||
self.conv2 = conv3x3(width, width, stride, groups, dilation)
|
||||
self.bn2 = norm_layer(width)
|
||||
self.conv3 = conv1x1(width, planes * self.expansion)
|
||||
self.bn3 = norm_layer(planes * self.expansion)
|
||||
self.relu = nn.ReLU(inplace=True)
|
||||
self.downsample = downsample
|
||||
self.stride = stride
|
||||
if self.cam:
|
||||
if planes == 64:
|
||||
self.globalAvgPool = nn.AvgPool2d(56, stride=1)
|
||||
elif planes == 128:
|
||||
self.globalAvgPool = nn.AvgPool2d(28, stride=1)
|
||||
elif planes == 256:
|
||||
self.globalAvgPool = nn.AvgPool2d(14, stride=1)
|
||||
elif planes == 512:
|
||||
self.globalAvgPool = nn.AvgPool2d(7, stride=1)
|
||||
|
||||
self.fc1 = nn.Linear(planes * self.expansion, round(planes / 4))
|
||||
self.fc2 = nn.Linear(round(planes / 4), planes * self.expansion)
|
||||
self.sigmod = nn.Sigmoid()
|
||||
|
||||
def forward(self, x):
|
||||
identity = x
|
||||
|
||||
out = self.conv1(x)
|
||||
out = self.bn1(out)
|
||||
out = self.relu(out)
|
||||
|
||||
out = self.conv2(out)
|
||||
out = self.bn2(out)
|
||||
out = self.relu(out)
|
||||
|
||||
out = self.conv3(out)
|
||||
out = self.bn3(out)
|
||||
|
||||
if self.downsample is not None:
|
||||
identity = self.downsample(x)
|
||||
|
||||
if self.cam:
|
||||
ori_out = self.globalAvgPool(out)
|
||||
out = out.view(out.size(0), -1)
|
||||
out = self.fc1(out)
|
||||
out = self.relu(out)
|
||||
out = self.fc2(out)
|
||||
out = self.sigmod(out)
|
||||
out = out.view(out.size(0), out.size(-1), 1, 1)
|
||||
out = out * ori_out
|
||||
out += identity
|
||||
out = self.relu(out)
|
||||
return out
|
||||
|
||||
|
||||
class ResNet(nn.Module):
|
||||
|
||||
def __init__(self, block, layers, num_classes=conf.embedding_size, zero_init_residual=False,
|
||||
groups=1, width_per_group=64, replace_stride_with_dilation=None,
|
||||
norm_layer=None, scale=0.75):
|
||||
super(ResNet, self).__init__()
|
||||
if norm_layer is None:
|
||||
norm_layer = nn.BatchNorm2d
|
||||
self._norm_layer = norm_layer
|
||||
|
||||
self.inplanes = 64
|
||||
self.dilation = 1
|
||||
if replace_stride_with_dilation is None:
|
||||
# each element in the tuple indicates if we should replace
|
||||
# the 2x2 stride with a dilated convolution instead
|
||||
replace_stride_with_dilation = [False, False, False]
|
||||
if len(replace_stride_with_dilation) != 3:
|
||||
raise ValueError("replace_stride_with_dilation should be None "
|
||||
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
|
||||
self.groups = groups
|
||||
self.base_width = width_per_group
|
||||
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
|
||||
bias=False)
|
||||
self.bn1 = norm_layer(self.inplanes)
|
||||
self.relu = nn.ReLU(inplace=True)
|
||||
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
|
||||
self.layer1 = self._make_layer(block, int(64*scale), layers[0])
|
||||
self.layer2 = self._make_layer(block, int(128*scale), layers[1], stride=2,
|
||||
dilate=replace_stride_with_dilation[0])
|
||||
self.layer3 = self._make_layer(block, int(256*scale), layers[2], stride=2,
|
||||
dilate=replace_stride_with_dilation[1])
|
||||
self.layer4 = self._make_layer(block, int(512*scale), layers[3], stride=2,
|
||||
dilate=replace_stride_with_dilation[2])
|
||||
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
|
||||
self.fc = nn.Linear(int(512 * block.expansion*scale), num_classes)
|
||||
|
||||
for m in self.modules():
|
||||
if isinstance(m, nn.Conv2d):
|
||||
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
||||
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
|
||||
nn.init.constant_(m.weight, 1)
|
||||
nn.init.constant_(m.bias, 0)
|
||||
|
||||
# Zero-initialize the last BN in each residual branch,
|
||||
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
|
||||
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
|
||||
if zero_init_residual:
|
||||
for m in self.modules():
|
||||
if isinstance(m, Bottleneck):
|
||||
nn.init.constant_(m.bn3.weight, 0)
|
||||
elif isinstance(m, BasicBlock):
|
||||
nn.init.constant_(m.bn2.weight, 0)
|
||||
|
||||
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
|
||||
norm_layer = self._norm_layer
|
||||
downsample = None
|
||||
previous_dilation = self.dilation
|
||||
if dilate:
|
||||
self.dilation *= stride
|
||||
stride = 1
|
||||
if stride != 1 or self.inplanes != planes * block.expansion:
|
||||
downsample = nn.Sequential(
|
||||
conv1x1(self.inplanes, planes * block.expansion, stride),
|
||||
norm_layer(planes * block.expansion),
|
||||
)
|
||||
|
||||
layers = []
|
||||
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
|
||||
self.base_width, previous_dilation, norm_layer))
|
||||
self.inplanes = planes * block.expansion
|
||||
for _ in range(1, blocks):
|
||||
layers.append(block(self.inplanes, planes, groups=self.groups,
|
||||
base_width=self.base_width, dilation=self.dilation,
|
||||
norm_layer=norm_layer))
|
||||
return nn.Sequential(*layers)
|
||||
|
||||
def _forward_impl(self, x):
|
||||
# See note [TorchScript super()]
|
||||
x = self.conv1(x)
|
||||
x = self.bn1(x)
|
||||
x = self.relu(x)
|
||||
x = self.maxpool(x)
|
||||
|
||||
x = self.layer1(x)
|
||||
x = self.layer2(x)
|
||||
x = self.layer3(x)
|
||||
x = self.layer4(x)
|
||||
|
||||
# print('poolBefore', x.shape)
|
||||
x = self.avgpool(x)
|
||||
# print('poolAfter', x.shape)
|
||||
x = torch.flatten(x, 1)
|
||||
# print('fcBefore',x.shape)
|
||||
x = self.fc(x)
|
||||
|
||||
# print('fcAfter',x.shape)
|
||||
|
||||
return x
|
||||
|
||||
def forward(self, x):
|
||||
return self._forward_impl(x)
|
||||
|
||||
|
||||
# def _resnet(arch, block, layers, pretrained, progress, **kwargs):
|
||||
# model = ResNet(block, layers, **kwargs)
|
||||
# if pretrained:
|
||||
# state_dict = load_state_dict_from_url(model_urls[arch],
|
||||
# progress=progress)
|
||||
# model.load_state_dict(state_dict, strict=False)
|
||||
# return model
|
||||
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
|
||||
model = ResNet(block, layers, **kwargs)
|
||||
if pretrained:
|
||||
state_dict = load_state_dict_from_url(model_urls[arch],
|
||||
progress=progress)
|
||||
|
||||
src_state_dict = state_dict
|
||||
target_state_dict = model.state_dict()
|
||||
skip_keys = []
|
||||
# skip mismatch size tensors in case of pretraining
|
||||
for k in src_state_dict.keys():
|
||||
if k not in target_state_dict:
|
||||
continue
|
||||
if src_state_dict[k].size() != target_state_dict[k].size():
|
||||
skip_keys.append(k)
|
||||
for k in skip_keys:
|
||||
del src_state_dict[k]
|
||||
missing_keys, unexpected_keys = model.load_state_dict(src_state_dict, strict=False)
|
||||
|
||||
return model
|
||||
|
||||
|
||||
def resnet14(pretrained=True, progress=True, **kwargs):
|
||||
r"""ResNet-14 model from
|
||||
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
|
||||
|
||||
Args:
|
||||
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
||||
progress (bool): If True, displays a progress bar of the download to stderr
|
||||
"""
|
||||
return _resnet('resnet18', BasicBlock, [2, 1, 1, 2], pretrained, progress,
|
||||
**kwargs)
|
||||
|
||||
|
||||
def resnet18(pretrained=True, progress=True, **kwargs):
|
||||
r"""ResNet-18 model from
|
||||
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
|
||||
|
||||
Args:
|
||||
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
||||
progress (bool): If True, displays a progress bar of the download to stderr
|
||||
"""
|
||||
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
|
||||
**kwargs)
|
||||
|
||||
|
||||
def resnet34(pretrained=False, progress=True, **kwargs):
|
||||
r"""ResNet-34 model from
|
||||
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
|
||||
|
||||
Args:
|
||||
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
||||
progress (bool): If True, displays a progress bar of the download to stderr
|
||||
"""
|
||||
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
|
||||
**kwargs)
|
||||
|
||||
|
||||
def resnet50(pretrained=False, progress=True, **kwargs):
|
||||
r"""ResNet-50 model from
|
||||
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
|
||||
|
||||
Args:
|
||||
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
||||
progress (bool): If True, displays a progress bar of the download to stderr
|
||||
"""
|
||||
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
|
||||
**kwargs)
|
||||
|
||||
|
||||
def resnet101(pretrained=False, progress=True, **kwargs):
|
||||
r"""ResNet-101 model from
|
||||
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
|
||||
|
||||
Args:
|
||||
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
||||
progress (bool): If True, displays a progress bar of the download to stderr
|
||||
"""
|
||||
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
|
||||
**kwargs)
|
||||
|
||||
|
||||
def resnet152(pretrained=False, progress=True, **kwargs):
|
||||
r"""ResNet-152 model from
|
||||
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
|
||||
|
||||
Args:
|
||||
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
||||
progress (bool): If True, displays a progress bar of the download to stderr
|
||||
"""
|
||||
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
|
||||
**kwargs)
|
||||
|
||||
|
||||
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
|
||||
r"""ResNeXt-50 32x4d model from
|
||||
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
|
||||
|
||||
Args:
|
||||
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
||||
progress (bool): If True, displays a progress bar of the download to stderr
|
||||
"""
|
||||
kwargs['groups'] = 32
|
||||
kwargs['width_per_group'] = 4
|
||||
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
|
||||
pretrained, progress, **kwargs)
|
||||
|
||||
|
||||
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
|
||||
r"""ResNeXt-101 32x8d model from
|
||||
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
|
||||
|
||||
Args:
|
||||
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
||||
progress (bool): If True, displays a progress bar of the download to stderr
|
||||
"""
|
||||
kwargs['groups'] = 32
|
||||
kwargs['width_per_group'] = 8
|
||||
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
|
||||
pretrained, progress, **kwargs)
|
||||
|
||||
|
||||
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
|
||||
r"""Wide ResNet-50-2 model from
|
||||
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
|
||||
|
||||
The model is the same as ResNet except for the bottleneck number of channels
|
||||
which is twice larger in every block. The number of channels in outer 1x1
|
||||
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
|
||||
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
|
||||
|
||||
Args:
|
||||
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
||||
progress (bool): If True, displays a progress bar of the download to stderr
|
||||
"""
|
||||
kwargs['width_per_group'] = 64 * 2
|
||||
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
|
||||
pretrained, progress, **kwargs)
|
||||
|
||||
|
||||
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
|
||||
r"""Wide ResNet-101-2 model from
|
||||
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
|
||||
|
||||
The model is the same as ResNet except for the bottleneck number of channels
|
||||
which is twice larger in every block. The number of channels in outer 1x1
|
||||
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
|
||||
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
|
||||
|
||||
Args:
|
||||
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
||||
progress (bool): If True, displays a progress bar of the download to stderr
|
||||
"""
|
||||
kwargs['width_per_group'] = 64 * 2
|
||||
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
|
||||
pretrained, progress, **kwargs)
|
Reference in New Issue
Block a user