This commit is contained in:
王庆刚
2024-11-25 18:05:08 +08:00
parent c47894ddc0
commit 8bbee310ba
109 changed files with 1003 additions and 305 deletions

160
contrast/feat_analysisi.py Normal file
View File

@ -0,0 +1,160 @@
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 20 11:17:29 2024
@author: ym
"""
import os
import cv2
import pickle
import numpy as np
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
def save_imgpairs(barcode, imgpaths, matrix, savepath, thresh=(0.4, 0.6), ctype="intra"):
if ctype=="intra":
rows, cols = np.triu_indices(matrix.shape[0], k=1) # k=1 表示不包括对角线
mask = matrix[rows, cols] < thresh[1]
indices = list(zip(rows[mask], cols[mask]))
else:
rows, cols = np.where(matrix > thresh[0])
indices = list(zip(rows, cols))
if len(indices):
savepath = os.path.join(savepath, barcode)
if not os.path.exists(savepath):
os.makedirs (savepath)
for idx1, idx2 in indices:
if len(imgpaths) == 1:
img1 = cv2.imread(imgpaths[0][idx1])
img2 = cv2.imread(imgpaths[0][idx2])
elif len(imgpaths) == 2:
img1 = cv2.imread(imgpaths[0][idx1])
img2 = cv2.imread(imgpaths[1][idx2])
simi = matrix[idx1, idx2]
H1, W1 = img1.shape[:2]
H2, W2 = img2.shape[:2]
H, W = max((H1, H2)), max((W1, W2))
img = np.ones((H, 2*W, 3), dtype=np.uint8) *np.array([255, 128, 128])
img[0:H1, 0:W1, :] = img1
img[0:H2, (2*W-W2):, :] = img2
text = f"sim: {simi:.2f}"
org = (10, H-10)
cv2.putText(img, text, org, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.75,
color=(0, 0, 255), thickness=2, lineType=cv2.LINE_AA)
imgpath = os.path.join(savepath, f"{simi:.2f}_{barcode}_{idx1}_{idx2}.png")
cv2.imwrite(imgpath, img)
def feat_analysis(featpath):
savepath = r"D:\exhibition\result\stdfeat"
InterThresh = (0.4, 0.6)
featDict, features= [], []
for filename in os.listdir(featpath):
file, ext = os.path.splitext(filename)
if ext != ".pickle": continue
filepath = os.path.join(featpath, filename)
with open(filepath, 'rb') as f:
bpDict = pickle.load(f)
feat = bpDict["feats_ft32"]
featDict.append(bpDict)
features.append(feat)
N = len(features)
simMatrix = []
intra_simi = np.empty(0)
low_simi_index = {}
for i, feats in enumerate(features):
matrix = 1 - cdist(feats, feats, 'cosine')
simMatrix.append(matrix)
'''提取相似矩阵上三角元素'''
rows, cols = np.triu_indices(matrix.shape[0], k=1) # k=1 表示不包括对角线
upper_tri= matrix[rows, cols]
intra_simi = np.concatenate((intra_simi, upper_tri))
'''保存相似度小于阈值的图像对'''
barcode = featDict[i]["barcode"]
imgpaths = featDict[i]["imgpaths"]
# save_imgpairs(barcode, [imgpaths], matrix, savepath, InterThresh, "intra")
print(f"{barcode} have done!")
Matrix = np.zeros((N, N))
inter_bcds = []
inter_simi = np.empty(0)
for i, feati in enumerate(features):
bcdi = featDict[i]["barcode"]
imgpathi = featDict[i]["imgpaths"]
for j, featj in enumerate(features):
bcdj = featDict[j]["barcode"]
imgpathj = featDict[j]["imgpaths"]
matrix = 1 - cdist(feati, featj, 'cosine')
inter_bcds.append((i, j, bcdi, bcdj))
Matrix[i, j] = np.mean(matrix)
if j>i:
bcd_ij = bcdi+'_'+bcdj
# save_imgpairs(bcd_ij, [imgpathi, imgpathj], matrix, savepath, InterThresh, "inter")
inter_simi = np.concatenate((inter_simi, matrix.ravel()))
print(f"{bcd_ij} have done!")
fig, axs = plt.subplots(2, 1)
axs[0].hist(intra_simi, bins=100, color='blue', edgecolor='black', alpha=0.7)
axs[0].set_xlim(0, 1)
axs[0].set_xlabel('Performance')
axs[0].set_title("inter similarity")
axs[1].hist(inter_simi, bins=100, color='green', edgecolor='black', alpha=0.7)
axs[1].set_xlim(0, 1)
axs[1].set_xlabel('Performance')
axs[1].set_title("inter similarity")
print("Done")
def main():
stdpath = r"D:\exhibition\dataset\feats"
feat_analysis(stdpath)
if __name__ == '__main__':
main()

View File

@ -10,6 +10,8 @@ class Config:
embedding_size = 256
drop_ratio = 0.5
img_size = 224
batch_size = 8
# data preprocess
# input_shape = [1, 128, 128]
@ -58,7 +60,11 @@ class Config:
testbackbone = 'resnet18' # [resnet18, mobilevit_s, mobilenet_v2, mobilenetv3_small, mobilenetv3_large, mobilenet_v1, PPLCNET_x1_0, PPLCNET_x0_5]
test_val = "D:/比对/cl"
# test_val = "./data/test_data_100"
test_model = "checkpoints/resnet18_0515/best.pth"
# test_model = "checkpoints/best_resnet18_v11.pth"
test_model = "checkpoints/zhanting_cls22_v11.pth"
train_batch_size = 512 # 256
test_batch_size = 256 # 256

View File

@ -3,30 +3,140 @@
@author: LiChen
"""
# import pdb
# import shutil
import torch.nn as nn
# import statistics
import os
import os.path as osp
import pdb
import numpy as np
import shutil
from scipy.spatial.distance import cdist
import torch
import torch.nn as nn
import os.path as osp
from PIL import Image
import json
from config import config as conf
from model import resnet18
import matplotlib.pyplot as plt
from pathlib import Path
# import sys
# sys.path.append(r"D:\DetectTracking")
# from contrast.config import config as conf
# from contrast.model import resnet18
from .config import config as conf
from .model import resnet18
# from model import (mobilevit_s, resnet14, resnet18, resnet34, resnet50, mobilenet_v2,
# MobileNetV3_Small, mobilenet_v1, PPLCNET_x1_0, PPLCNET_x0_5, PPLCNET_x2_5)
curpath = Path(__file__).resolve().parents[0]
class FeatsInterface:
def __init__(self, conf):
self.device = conf.device
# if conf.backbone == 'resnet18':
# model = resnet18().to(conf.device)
model = resnet18().to(conf.device)
self.transform = conf.test_transform
self.batch_size = conf.batch_size
self.embedding_size = conf.embedding_size
if conf.test_model.find("zhanting") == -1:
model = nn.DataParallel(model).to(conf.device)
self.model = model
modpath = os.path.join(curpath, conf.test_model)
self.model.load_state_dict(torch.load(modpath, map_location=conf.device))
self.model.eval()
print('load model {} '.format(conf.testbackbone))
def inference(self, images, detections=None):
'''
如果是BGR需要转变为RGB格式
'''
if isinstance(images, np.ndarray):
imgs, features = self.inference_image(images, detections)
return imgs, features
batch_patches = []
patches = []
for i, img in enumerate(images):
img = img.copy()
patch = self.transform(img)
if str(self.device) != "cpu":
patch = patch.to(device=self.device).half()
else:
patch = patch.to(device=self.device)
patches.append(patch)
if (i + 1) % self.batch_size == 0:
patches = torch.stack(patches, dim=0)
batch_patches.append(patches)
patches = []
if len(patches):
patches = torch.stack(patches, dim=0)
batch_patches.append(patches)
features = np.zeros((0, self.embedding_size))
for patches in batch_patches:
pred=self.model(patches)
pred[torch.isinf(pred)] = 1.0
feat = pred.cpu().data.numpy()
features = np.vstack((features, feat))
return features
def inference_image(self, image, detections):
H, W, _ = np.shape(image)
batch_patches = []
patches = []
imgs = []
for d in range(np.size(detections, 0)):
tlbr = detections[d, :4].astype(np.int_)
tlbr[0] = max(0, tlbr[0])
tlbr[1] = max(0, tlbr[1])
tlbr[2] = min(W - 1, tlbr[2])
tlbr[3] = min(H - 1, tlbr[3])
img = image[tlbr[1]:tlbr[3], tlbr[0]:tlbr[2], :]
imgs.append(img)
img1 = img[:, :, ::-1].copy() # the model expects RGB inputs
patch = self.transform(img1)
# patch = patch.to(device=self.device).half()
if str(self.device) != "cpu":
patch = patch.to(device=self.device).half()
else:
patch = patch.to(device=self.device)
patches.append(patch)
if (d + 1) % self.batch_size == 0:
patches = torch.stack(patches, dim=0)
batch_patches.append(patches)
patches = []
if len(patches):
patches = torch.stack(patches, dim=0)
batch_patches.append(patches)
features = np.zeros((0, self.embedding_size))
for patches in batch_patches:
pred = self.model(patches)
pred[torch.isinf(pred)] = 1.0
feat = pred.cpu().data.numpy()
features = np.vstack((features, feat))
return imgs, features
import matplotlib.pyplot as plt
import statistics
embedding_size = conf.embedding_size
img_size = conf.img_size
device = conf.device
def unique_image(pair_list) -> set:
@ -102,38 +212,38 @@ def featurize(images: list, transform, net, device, train=False) -> dict:
res = {img: feature for (img, feature) in zip(images, features)}
return res
def inference_image(images: list, transform, net, device, bs=16, embedding_size=256) -> dict:
batch_patches = []
patches = []
for d, img in enumerate(images):
img = Image.open(img)
patch = transform(img)
# def inference_image(images: list, transform, net, device, bs=16, embedding_size=256) -> dict:
# batch_patches = []
# patches = []
# for d, img in enumerate(images):
# img = Image.open(img)
# patch = transform(img)
if str(device) != "cpu":
patch = patch.to(device).half()
else:
patch = patch.to(device)
# if str(device) != "cpu":
# patch = patch.to(device).half()
# else:
# patch = patch.to(device)
patches.append(patch)
if (d + 1) % bs == 0:
patches = torch.stack(patches, dim=0)
batch_patches.append(patches)
patches = []
# patches.append(patch)
# if (d + 1) % bs == 0:
# patches = torch.stack(patches, dim=0)
# batch_patches.append(patches)
# patches = []
if len(patches):
patches = torch.stack(patches, dim=0)
batch_patches.append(patches)
# if len(patches):
# patches = torch.stack(patches, dim=0)
# batch_patches.append(patches)
features = np.zeros((0, embedding_size), dtype=np.float32)
for patches in batch_patches:
pred = net(patches)
pred[torch.isinf(pred)] = 1.0
feat = pred.cpu().data.numpy()
features = np.vstack((features, feat))
# features = np.zeros((0, embedding_size), dtype=np.float32)
# for patches in batch_patches:
# pred = net(patches)
# pred[torch.isinf(pred)] = 1.0
# feat = pred.cpu().data.numpy()
# features = np.vstack((features, feat))
return features
# return features
@ -283,6 +393,7 @@ def compute_contrast_accuracy(content_list_read):
npairs = min((len(same_folder_pairs), len(cross_folder_pairs)))
Encoder = FeatsInterface(conf)
same_pairs = same_folder_pairs[:npairs]
cross_pairs = cross_folder_pairs[:npairs]
@ -292,8 +403,8 @@ def compute_contrast_accuracy(content_list_read):
images_a = [osp.join(conf.test_val, img) for img in same_pairs[i][0]]
images_b = [osp.join(conf.test_val, img) for img in same_pairs[i][1]]
feats_a = inference_image(images_a, conf.test_transform, model, conf.device)
feats_b = inference_image(images_b, conf.test_transform, model, conf.device)
feats_a = Encoder.inference(images_a)
feats_b = Encoder.inference(images_b)
# matrix = 1- np.maximum(0.0, cdist(feats_a, feats_b, 'cosine'))
matrix = 1 - cdist(feats_a, feats_b, 'cosine')
@ -324,8 +435,8 @@ def compute_contrast_accuracy(content_list_read):
images_a = [osp.join(conf.test_val, img) for img in cross_pairs[i][0]]
images_b = [osp.join(conf.test_val, img) for img in cross_pairs[i][1]]
feats_a = inference_image(images_a, conf.test_transform, model, conf.device)
feats_b = inference_image(images_b, conf.test_transform, model, conf.device)
feats_a = Encoder.inference(images_a)
feats_b = Encoder.inference(images_b)
# matrix = 1- np.maximum(0.0, cdist(feats_a, feats_b, 'cosine'))
matrix = 1 - cdist(feats_a, feats_b, 'cosine')
@ -407,28 +518,28 @@ if __name__ == '__main__':
# Network Setup
if conf.testbackbone == 'resnet18':
# model = ResIRSE(img_size, embedding_size, conf.drop_ratio).to(device)
model = resnet18().to(device)
# model = ResIRSE(conf.img_size, conf.embedding_size, conf.drop_ratio).to(conf.device)
model = resnet18().to(conf.device)
# elif conf.testbackbone == 'resnet34':
# model = resnet34().to(device)
# model = resnet34().to(conf.device)
# elif conf.testbackbone == 'resnet50':
# model = resnet50().to(device)
# model = resnet50().to(conf.device)
# elif conf.testbackbone == 'mobilevit_s':
# model = mobilevit_s().to(device)
# model = mobilevit_s().to(conf.device)
# elif conf.testbackbone == 'mobilenetv3':
# model = MobileNetV3_Small().to(device)
# model = MobileNetV3_Small().to(conf.device)
# elif conf.testbackbone == 'mobilenet_v1':
# model = mobilenet_v1().to(device)
# model = mobilenet_v1().to(conf.device)
# elif conf.testbackbone == 'PPLCNET_x1_0':
# model = PPLCNET_x1_0().to(device)
# model = PPLCNET_x1_0().to(conf.device)
# elif conf.testbackbone == 'PPLCNET_x0_5':
# model = PPLCNET_x0_5().to(device)
# model = PPLCNET_x0_5().to(conf.device)
# elif conf.backbone == 'PPLCNET_x2_5':
# model = PPLCNET_x2_5().to(device)
# model = PPLCNET_x2_5().to(conf.device)
# elif conf.testbackbone == 'mobilenet_v2':
# model = mobilenet_v2().to(device)
# model = mobilenet_v2().to(conf.device)
# elif conf.testbackbone == 'resnet14':
# model = resnet14().to(device)
# model = resnet14().to(conf.device)
else:
raise ValueError('Have not model {}'.format(conf.backbone))

View File

@ -1,6 +1,6 @@
from torch import nn
from .utils import load_state_dict_from_url
from config import config as conf
from ..config import config as conf
__all__ = ['MobileNetV2', 'mobilenet_v2']

View File

@ -7,7 +7,7 @@ import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from config import config as conf
from ..config import config as conf
class hswish(nn.Module):

View File

@ -2,7 +2,10 @@ import torch
import torch.nn as nn
from einops import rearrange
from config import config as conf
# import sys
# sys.path.append(r"D:\DetectTracking")
from ..config import config as conf
def conv_1x1_bn(inp, oup):

View File

@ -1,7 +1,10 @@
from model.CBAM import CBAM
import torch
import torch.nn as nn
from model.Tool import GeM as gem
from .CBAM import CBAM
from .Tool import GeM as gem
# from model.CBAM import CBAM
# from model.Tool import GeM as gem
class Bottleneck(nn.Module):

View File

@ -1,6 +1,6 @@
import torch
import torch.nn as nn
from config import config as conf
from ..config import config as conf
try:
from torch.hub import load_state_dict_from_url

View File

@ -6,26 +6,33 @@ Created on Sun Nov 3 12:05:19 2024
"""
import os
import time
import torch
# import torch
import pickle
# import json
import numpy as np
from config import config as conf
from model import resnet18 as resnet18
from feat_inference import inference_image
from PIL import Image
from feat_extract.config import config as conf
# from model import resnet18 as resnet18
from feat_extract.inference import FeatsInterface #, inference_image
IMG_FORMAT = ['.bmp', '.jpg', '.jpeg', '.png']
'''======= 0. 配置特征提取模型地址 ======='''
model_path = conf.test_model
model_path = r"D:\exhibition\ckpt\zhanting.pth"
# def model_init(conf, mpath=None):
# '''======= 0. 配置特征提取模型地址 ======='''
# if mpath is None:
# model_path = conf.test_model
# else:
# model_path = mpath
##============ load resnet mdoel
model = resnet18().to(conf.device)
# model = nn.DataParallel(model).to(conf.device)
model.load_state_dict(torch.load(model_path, map_location=conf.device))
model.eval()
print('load model {} '.format(conf.testbackbone))
# ##============ load resnet mdoel
# model = resnet18().to(conf.device)
# # model = nn.DataParallel(model).to(conf.device)
# model.load_state_dict(torch.load(model_path, map_location=conf.device))
# model.eval()
# print('load model {} '.format(conf.testbackbone))
# return model
def get_std_barcodeDict(bcdpath, savepath, bcdSet):
'''
@ -42,9 +49,9 @@ def get_std_barcodeDict(bcdpath, savepath, bcdSet):
'''读取数据集中 barcode 列表'''
stdBarcodeList = []
for filename in os.listdir(bcdpath):
# filepath = os.path.join(bcdpath, filename)
# if not os.path.isdir(filepath) or not filename.isdigit() or len(filename)<8:
# continue
filepath = os.path.join(bcdpath, filename)
if not os.path.isdir(filepath) or not filename.isdigit() or len(filename)<8:
continue
if bcdSet is None:
stdBarcodeList.append(filename)
elif filename in bcdSet:
@ -59,7 +66,7 @@ def get_std_barcodeDict(bcdpath, savepath, bcdSet):
for barcode, bpath in bcdPaths:
pickpath = os.path.join(savepath, f"{barcode}.pickle")
if os.path.isfile(pickpath):
continue
continue
stdBarcodeDict = {}
stdBarcodeDict[barcode] = []
@ -89,6 +96,7 @@ def get_std_barcodeDict(bcdpath, savepath, bcdSet):
pickpath = os.path.join(savepath, f"{barcode}.pickle")
with open(pickpath, 'wb') as f:
pickle.dump(stdBarcodeDict, f)
print(f"Barcode: {barcode}")
# k += 1
@ -115,32 +123,37 @@ def stdfeat_infer(imgPath, featPath, bcdSet=None):
stdBarcodeDict = {}
stdBarcodeDict_ft16 = {}
Encoder = FeatsInterface(conf)
'''4处同名: (1)barcode原始图像文件夹; (2)imgPath中的 .pickle 文件名、该pickle文件中字典的key值'''
'''4处同名: (1)barcode原始图像文件夹; (2)imgPath中的 .pickle 文件名;
(3)该pickle文件中字典的key值; (4)特征向量字典中的一个key值'''
k = 0
for filename in os.listdir(imgPath):
bcd, ext = os.path.splitext(filename)
pkpath = os.path.join(featPath, f"{bcd}.pickle")
if os.path.isfile(pkpath): continue
filepath = os.path.join(imgPath, filename)
if ext != ".pickle": continue
if bcdSet is not None and bcd not in bcdSet:
continue
filepath = os.path.join(imgPath, filename)
featpath = os.path.join(featPath, f"{bcd}.pickle")
stdbDict = {}
stdbDict_ft16 = {}
stdbDict_uint8 = {}
t1 = time.time()
try:
with open(filepath, 'rb') as f:
bpDict = pickle.load(f)
bpDict = pickle.load(f)
for barcode, imgpaths in bpDict.items():
# feature = batch_inference(imgpaths, 8) #from vit distilled model of LiChen
feature = inference_image(imgpaths, conf.test_transform, model, conf.device)
# feature = inference_image(imgpaths, conf.test_transform, model, conf.device)
imgs = []
for d, imgpath in enumerate(imgpaths):
img = Image.open(imgpath)
imgs.append(img)
feature = Encoder.inference(imgs)
feature /= np.linalg.norm(feature, axis=1)[:, None]
# float16
@ -162,7 +175,7 @@ def stdfeat_infer(imgPath, featPath, bcdSet=None):
stdbDict["feats_ft16"] = feature_ft16
stdbDict["feats_uint8"] = feature_uint8
with open(pkpath, 'wb') as f:
with open(featpath, 'wb') as f:
pickle.dump(stdbDict, f)
stdBarcodeDict[barcode] = feature
@ -174,21 +187,10 @@ def stdfeat_infer(imgPath, featPath, bcdSet=None):
# if k == 10:
# break
##================== float32
# pickpath = os.path.join(featPath, f"barcode_features_{k}.pickle")
# with open(pickpath, 'wb') as f:
# pickle.dump(stdBarcodeDict, f)
##================== float16
# pickpath_ft16 = os.path.join(featPath, f"barcode_features_ft16_{k}.pickle")
# with open(pickpath_ft16, 'wb') as f:
# pickle.dump(stdBarcodeDict_ft16, f)
return
def genfeatures(imgpath, bcdpath, featpath, bcdSet=None):
def gen_bcd_features(imgpath, bcdpath, featpath, bcdSet=None):
''' 生成标准特征集 '''
'''1. 提取 imgpath 中样本地址,生成字典{barcode: [imgpath1, imgpath1, ...]}
并存储于: bcdpath, 格式为 barcode.pickle'''
@ -198,11 +200,12 @@ def genfeatures(imgpath, bcdpath, featpath, bcdSet=None):
stdfeat_infer(bcdpath, featpath, bcdSet)
def main():
imgpath = r"\\192.168.1.28\share\展厅barcode数据\整理\zhantingBase"
imgpath = r"\\192.168.1.28\share\数据\已完成数据\展厅数据\v1.0\比对数据\整理\zhantingBase"
bcdpath = r"D:\exhibition\dataset\bcdpath"
featpath = r"D:\exhibition\dataset\feats"
genfeatures(imgpath, bcdpath, featpath)
gen_bcd_features(imgpath, bcdpath, featpath)

View File

@ -325,7 +325,7 @@ def one2n_deleted(all_list):
def one2n_return(all_list, basepath):
def one2n_return(all_list):
corrpairs, corr_similarity, errpairs, err_similarity = [], [], [], []
for s_list in all_list:
@ -410,7 +410,7 @@ def test_rpath_return():
savepath = r'D:\DetectTracking\contrast\result'
all_list = read_returnGoods_file(return_bfile)
corrpairs, errpairs, _, _ = one2n_return(all_list, basepath)
corrpairs, errpairs, _, _ = one2n_return(all_list)
for corrpair in corrpairs:
GetoutPath = os.path.join(basepath, corrpair[0])
InputPath = os.path.join(basepath, corrpair[1])
@ -435,7 +435,7 @@ def test_one2n():
savepath: pr曲线保存路径
'''
# fpath = r'\\192.168.1.28\share\测试_202406\deletedBarcode\other' # deletedBarcode.txt
fpath = r'\\192.168.1.28\share\测试_202406\returnGoods\all' # returnGoods.txt
fpath = r'\\192.168.1.28\share\测试_202406\1108_展厅模型v800测试' # returnGoods.txt
savepath = r'\\192.168.1.28\share\测试_202406\deletedBarcode\illustration'
if os.path.isdir(fpath):
@ -476,9 +476,9 @@ def test_one2n():
plt1.savefig(os.path.join(savepath, file+'_pr.png'))
# plt1.close()
# plt2 = showHist(err_similarity, correct_similarity)
# plt2.show()
# plt2.savefig(os.path.join(savepath, file+'_hist.png'))
plt2 = showHist(err_similarity, correct_similarity)
plt2.show()
plt2.savefig(os.path.join(savepath, file+'_hist.png'))
# plt.close()
@ -486,7 +486,7 @@ def test_one2n():
if __name__ == '__main__':
# test_one2n()
test_rpath_return() # returnGoods.txt
test_rpath_deleted() # deleteBarcode.txt
# test_rpath_deleted() # deleteBarcode.txt
# try:

View File

@ -49,7 +49,8 @@ from datetime import datetime
sys.path.append(r"D:\DetectTracking")
from tracking.utils.read_data import extract_data, read_tracking_output, read_one2one_simi, read_deletedBarcode_file
from genfeats import genfeatures, stdfeat_infer
from config import config as conf
from genfeats import model_init, genfeatures, stdfeat_infer
IMG_FORMAT = ['.bmp', '.jpg', '.jpeg', '.png']
@ -546,10 +547,10 @@ def test_one2one():
model = model_init(conf)
'''==== 1. 生成标准特征集, 只需运行一次 ==============='''
genfeatures(stdSamplePath, stdBarcodePath, stdFeaturePath, bcdSet)
genfeatures(model, stdSamplePath, stdBarcodePath, stdFeaturePath, bcdSet)
print("stdFeats have generated and saved!")

View File

@ -7,7 +7,12 @@ Created on Wed Sep 11 11:57:30 2024
"""
import os
import numpy as np
from pathlib import Path
import matplotlib.pyplot as plt
import sys
sys.path.append(r"D:\DetectTracking")
from tracking.utils.read_data import read_similar
def read_one2one_data(filepath):
simiList = []
@ -85,7 +90,7 @@ def plot_pr_curve(matrix):
pass
def main():
def test_compare():
filepaths = [r"\\192.168.1.28\share\测试_202406\0913_扫A放B\0913_1\OneToOneCompare.txt",
r"\\192.168.1.28\share\测试_202406\0913_扫A放B\0913_2\OneToOneCompare.txt",
r"\\192.168.1.28\share\测试_202406\0914_扫A放B\0914_1\OneToOneCompare.txt",
@ -99,10 +104,251 @@ def main():
plot_pr_curve(simiList)
def one2one_pr(paths):
paths = Path(paths)
evtpaths = [p for p in paths.iterdir() if p.is_dir() and len(p.name.split('_'))>=2]
events, similars = [], []
##===================================== 扫A放A, 扫A放B场景
one2oneAA, one2oneAB = [], []
##===================================== 应用于展厅 1N
tp_events, fn_events, fp_events, tn_events = [], [], [], []
tp_simi, fn_simi, tn_simi, fp_simi = [], [], [], []
##===================================== 应用于1:n
tpevents, fnevents, fpevents, tnevents = [], [], [], []
tpsimi, fnsimi, tnsimi, fpsimi = [], [], [], []
for path in evtpaths:
barcode = path.stem.split('_')[-1]
datapath = path.joinpath('process.data')
if not barcode.isdigit() or len(barcode)<10: continue
if not datapath.is_file(): continue
try:
SimiDict = read_similar(datapath)
except Exception as e:
print(f"{path.stem}, Error: {e}")
one2one = SimiDict['one2one']
one2n = SimiDict['one2n']
barcodes, similars = [], []
for dt in one2one:
barcodes.append(dt['barcode'])
similars.append(dt['similar'])
if len(barcodes)!=len(similars) or len(barcodes)==0:
continue
##===================================== 扫A放A, 扫A放B场景
simAA = [similars[i] for i in range(len(barcodes)) if barcodes[i]==barcode]
simAB = [similars[i] for i in range(len(barcodes)) if barcodes[i]!=barcode]
one2oneAA.extend(simAA)
one2oneAB.extend(simAB)
##===================================== 以下应用适用于展厅 1N
max_idx = similars.index(max(similars))
max_sim = similars[max_idx]
# max_bcd = barcodes[max_idx]
for i in range(len(one2one)):
bcd, simi = barcodes[i], similars[i]
if bcd==barcode and simi==max_sim:
tp_simi.append(simi)
tp_events.append(path.stem)
elif bcd==barcode and simi!=max_sim:
fn_simi.append(simi)
fn_events.append(path.stem)
elif bcd!=barcode and simi!=max_sim:
tn_simi.append(simi)
tn_events.append(path.stem)
else:
fp_simi.append(simi)
fp_events.append(path.stem)
##===================================== 以下应用适用1:n
events, evt_barcodes, evt_similars, evt_types = [], [], [], []
for dt in one2n:
events.append(dt["event"])
evt_barcodes.append(dt["barcode"])
evt_similars.append(dt["similar"])
evt_types.append(dt["type"])
if len(events)!=len(evt_barcodes) or len(evt_barcodes)!=len(evt_similars) \
or len(evt_barcodes)!=len(evt_similars) or len(events)==0: continue
maxsim = evt_similars[evt_similars.index(max(evt_similars))]
for i in range(len(one2n)):
bcd, simi = evt_barcodes[i], evt_similars[i]
if bcd==barcode and simi==maxsim:
tpsimi.append(simi)
tpevents.append(path.stem)
elif bcd==barcode and simi!=maxsim:
fnsimi.append(simi)
fnevents.append(path.stem)
elif bcd!=barcode and simi!=maxsim:
tnsimi.append(simi)
tnevents.append(path.stem)
else:
fpsimi.append(simi)
fpevents.append(path.stem)
'''命名规则:
1:1 1:n 1:N
TP_ TP TPX
PPrecise_ PPrecise PPreciseX
tpsimi tp_simi
'''
''' 1:1 数据存储'''
PPrecise_, PRecall_ = [], []
NPrecise_, NRecall_ = [], []
''' 1:n 数据存储'''
PPrecise, PRecall = [], []
NPrecise, NRecall = [], []
''' 展厅 1:N 数据存储'''
PPreciseX, PRecallX = [], []
NPreciseX, NRecallX = [], []
Thresh = np.linspace(-0.2, 1, 100)
for th in Thresh:
'''============================= 1:1'''
TP_ = sum(np.array(one2oneAA) >= th)
FP_ = sum(np.array(one2oneAB) >= th)
FN_ = sum(np.array(one2oneAA) < th)
TN_ = sum(np.array(one2oneAB) < th)
PPrecise_.append(TP_/(TP_+FP_+1e-6))
PRecall_.append(TP_/(TP_+FN_+1e-6))
NPrecise_.append(TN_/(TN_+FN_+1e-6))
NRecall_.append(TN_/(TN_+FP_+1e-6))
'''============================= 1:n'''
TP = sum(np.array(tpsimi) >= th)
FP = sum(np.array(fpsimi) >= th)
FN = sum(np.array(fnsimi) < th)
TN = sum(np.array(tnsimi) < th)
PPrecise.append(TP/(TP+FP+1e-6))
PRecall.append(TP/(TP+FN+1e-6))
NPrecise.append(TN/(TN+FN+1e-6))
NRecall.append(TN/(TN+FP+1e-6))
'''============================= 1:N 展厅'''
TPX = sum(np.array(tp_simi) >= th)
FPX = sum(np.array(fp_simi) >= th)
FNX = sum(np.array(fn_simi) < th)
TNX = sum(np.array(tn_simi) < th)
PPreciseX.append(TPX/(TPX+FPX+1e-6))
PRecallX.append(TPX/(TPX+FNX+1e-6))
NPreciseX.append(TNX/(TNX+FNX+1e-6))
NRecallX.append(TNX/(TNX+FPX+1e-6))
'''============================= 1:1 曲线'''
fig, ax = plt.subplots()
ax.plot(Thresh, PPrecise_, 'r', label='Precise_Pos: TP/TPFP')
ax.plot(Thresh, PRecall_, 'b', label='Recall_Pos: TP/TPFN')
ax.plot(Thresh, NPrecise_, 'g', label='Precise_Neg: TN/TNFP')
ax.plot(Thresh, NRecall_, 'c', label='Recall_Neg: TN/TNFN')
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.grid(True)
ax.set_title('Precise & Recall')
ax.set_xlabel(f"Num: {len(evtpaths)}")
ax.legend()
plt.show()
'''============================= 1:1 直方图'''
fig, axes = plt.subplots(2, 1)
axes[0].hist(np.array(one2oneAA), bins=60, edgecolor='black')
axes[0].set_xlim([-0.2, 1])
axes[0].set_title('AA')
axes[1].hist(np.array(one2oneAB), bins=60, edgecolor='black')
axes[1].set_xlim([-0.2, 1])
axes[1].set_title('BB')
plt.show()
'''============================= 1:n 曲线'''
fig, ax = plt.subplots()
ax.plot(Thresh, PPrecise, 'r', label='Precise_Pos: TP/TPFP')
ax.plot(Thresh, PRecall, 'b', label='Recall_Pos: TP/TPFN')
ax.plot(Thresh, NPrecise, 'g', label='Precise_Neg: TN/TNFP')
ax.plot(Thresh, NRecall, 'c', label='Recall_Neg: TN/TNFN')
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.grid(True)
ax.set_title('Precise & Recall')
ax.set_xlabel(f"Num: {len(evtpaths)}")
ax.legend()
plt.show()
'''============================= 1:n 直方图'''
fig, axes = plt.subplots(2, 2)
axes[0, 0].hist(tpsimi, bins=60, edgecolor='black')
axes[0, 0].set_xlim([-0.2, 1])
axes[0, 0].set_title('TP')
axes[0, 1].hist(fpsimi, bins=60, edgecolor='black')
axes[0, 1].set_xlim([-0.2, 1])
axes[0, 1].set_title('FP')
axes[1, 0].hist(tnsimi, bins=60, edgecolor='black')
axes[1, 0].set_xlim([-0.2, 1])
axes[1, 0].set_title('TN')
axes[1, 1].hist(fnsimi, bins=60, edgecolor='black')
axes[1, 1].set_xlim([-0.2, 1])
axes[1, 1].set_title('FN')
plt.show()
'''============================= 1:N 展厅 曲线'''
fig, ax = plt.subplots()
ax.plot(Thresh, PPreciseX, 'r', label='Precise_Pos: TP/TPFP')
ax.plot(Thresh, PRecallX, 'b', label='Recall_Pos: TP/TPFN')
ax.plot(Thresh, NPreciseX, 'g', label='Precise_Neg: TN/TNFP')
ax.plot(Thresh, NRecallX, 'c', label='Recall_Neg: TN/TNFN')
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ax.grid(True)
ax.set_title('Precise & Recall')
ax.set_xlabel(f"Num: {len(evtpaths)}")
ax.legend()
plt.show()
'''============================= 1:N 展厅 直方图'''
fig, axes = plt.subplots(2, 2)
axes[0, 0].hist(tp_simi, bins=60, edgecolor='black')
axes[0, 0].set_xlim([-0.2, 1])
axes[0, 0].set_title('TP')
axes[0, 1].hist(fp_simi, bins=60, edgecolor='black')
axes[0, 1].set_xlim([-0.2, 1])
axes[0, 1].set_title('FP')
axes[1, 0].hist(tn_simi, bins=60, edgecolor='black')
axes[1, 0].set_xlim([-0.2, 1])
axes[1, 0].set_title('TN')
axes[1, 1].hist(fn_simi, bins=60, edgecolor='black')
axes[1, 1].set_xlim([-0.2, 1])
axes[1, 1].set_title('FN')
plt.show()
print('Done!')
if __name__ == "__main__":
main()
evtpaths = r"\\192.168.1.28\share\测试视频数据以及日志\各模块测试记录\展厅测试\1120_展厅模型v801测试\扫A放A"
one2one_pr(evtpaths)